code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( snake_case : Dict , snake_case : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
snake_case_ = LxmertConfig.from_json_file(snake_case )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ = LxmertForPreTraining(snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case , snake_case , snake_case )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 400
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : torch.FloatTensor
class _snake_case ( nn.Module ):
def __init__( self , a__=3 , a__=3 , a__=("DownEncoderBlock2D",) , a__=(64,) , a__=2 , a__=32 , a__="silu" , a__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
snake_case_ = layers_per_block
snake_case_ = torch.nn.Convad(
a__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case_ = None
snake_case_ = nn.ModuleList([] )
# down
snake_case_ = block_out_channels[0]
for i, down_block_type in enumerate(a__ ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(a__ ) - 1
snake_case_ = get_down_block(
a__ , num_layers=self.layers_per_block , in_channels=a__ , out_channels=a__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=a__ , resnet_groups=a__ , attention_head_dim=a__ , temb_channels=a__ , )
self.down_blocks.append(a__ )
# mid
snake_case_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=a__ , temb_channels=a__ , )
# out
snake_case_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a__ , eps=1e-6 )
snake_case_ = nn.SiLU()
snake_case_ = 2 * out_channels if double_z else out_channels
snake_case_ = nn.Convad(block_out_channels[-1] , a__ , 3 , padding=1 )
snake_case_ = False
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = x
snake_case_ = self.conv_in(a__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a__ ):
def custom_forward(*a__ ):
return module(*a__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
snake_case_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(a__ ) , a__ , use_reentrant=a__ )
# middle
snake_case_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , use_reentrant=a__ )
else:
for down_block in self.down_blocks:
snake_case_ = torch.utils.checkpoint.checkpoint(create_custom_forward(a__ ) , a__ )
# middle
snake_case_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a__ )
else:
# down
for down_block in self.down_blocks:
snake_case_ = down_block(a__ )
# middle
snake_case_ = self.mid_block(a__ )
# post-process
snake_case_ = self.conv_norm_out(a__ )
snake_case_ = self.conv_act(a__ )
snake_case_ = self.conv_out(a__ )
return sample
class _snake_case ( nn.Module ):
def __init__( self , a__=3 , a__=3 , a__=("UpDecoderBlock2D",) , a__=(64,) , a__=2 , a__=32 , a__="silu" , a__="group" , ) -> int:
'''simple docstring'''
super().__init__()
snake_case_ = layers_per_block
snake_case_ = nn.Convad(
a__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case_ = None
snake_case_ = nn.ModuleList([] )
snake_case_ = in_channels if norm_type == "spatial" else None
# mid
snake_case_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a__ , temb_channels=a__ , )
# up
snake_case_ = list(reversed(a__ ) )
snake_case_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a__ ):
snake_case_ = output_channel
snake_case_ = reversed_block_out_channels[i]
snake_case_ = i == len(a__ ) - 1
snake_case_ = get_up_block(
a__ , num_layers=self.layers_per_block + 1 , in_channels=a__ , out_channels=a__ , prev_output_channel=a__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=a__ , resnet_groups=a__ , attention_head_dim=a__ , temb_channels=a__ , resnet_time_scale_shift=a__ , )
self.up_blocks.append(a__ )
snake_case_ = output_channel
# out
if norm_type == "spatial":
snake_case_ = SpatialNorm(block_out_channels[0] , a__ )
else:
snake_case_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a__ , eps=1e-6 )
snake_case_ = nn.SiLU()
snake_case_ = nn.Convad(block_out_channels[0] , a__ , 3 , padding=1 )
snake_case_ = False
def lowerCAmelCase__ ( self , a__ , a__=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = z
snake_case_ = self.conv_in(a__ )
snake_case_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a__ ):
def custom_forward(*a__ ):
return module(*a__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
snake_case_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , a__ , use_reentrant=a__ )
snake_case_ = sample.to(a__ )
# up
for up_block in self.up_blocks:
snake_case_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(a__ ) , a__ , a__ , use_reentrant=a__ )
else:
# middle
snake_case_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a__ , a__ )
snake_case_ = sample.to(a__ )
# up
for up_block in self.up_blocks:
snake_case_ = torch.utils.checkpoint.checkpoint(create_custom_forward(a__ ) , a__ , a__ )
else:
# middle
snake_case_ = self.mid_block(a__ , a__ )
snake_case_ = sample.to(a__ )
# up
for up_block in self.up_blocks:
snake_case_ = up_block(a__ , a__ )
# post-process
if latent_embeds is None:
snake_case_ = self.conv_norm_out(a__ )
else:
snake_case_ = self.conv_norm_out(a__ , a__ )
snake_case_ = self.conv_act(a__ )
snake_case_ = self.conv_out(a__ )
return sample
class _snake_case ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__=None , a__="random" , a__=False , a__=True ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ = n_e
snake_case_ = vq_embed_dim
snake_case_ = beta
snake_case_ = legacy
snake_case_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case_ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
snake_case_ = self.used.shape[0]
snake_case_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case_ = self.re_embed
snake_case_ = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
snake_case_ = n_e
snake_case_ = sane_index_shape
def lowerCAmelCase__ ( self , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = inds.shape
assert len(a__ ) > 1
snake_case_ = inds.reshape(ishape[0] , -1 )
snake_case_ = self.used.to(a__ )
snake_case_ = (inds[:, :, None] == used[None, None, ...]).long()
snake_case_ = match.argmax(-1 )
snake_case_ = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case_ = self.unknown_index
return new.reshape(a__ )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = inds.shape
assert len(a__ ) > 1
snake_case_ = inds.reshape(ishape[0] , -1 )
snake_case_ = self.used.to(a__ )
if self.re_embed > self.used.shape[0]: # extra token
snake_case_ = 0 # simply set to zero
snake_case_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a__ )
return back.reshape(a__ )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case_ = torch.argmin(torch.cdist(a__ , self.embedding.weight ) , dim=1 )
snake_case_ = self.embedding(a__ ).view(z.shape )
snake_case_ = None
snake_case_ = None
# compute loss for embedding
if not self.legacy:
snake_case_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case_ = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case_ = self.remap_to_used(a__ )
snake_case_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCAmelCase__ ( self , a__ , a__ ) -> List[str]:
'''simple docstring'''
if self.remap is not None:
snake_case_ = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case_ = self.unmap_to_all(a__ )
snake_case_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case_ = self.embedding(a__ )
if shape is not None:
snake_case_ = z_q.view(a__ )
# reshape back to match original input shape
snake_case_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _snake_case ( lowercase_ ):
def __init__( self , a__ , a__=False ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parameters
snake_case_ , snake_case_ = torch.chunk(a__ , 2 , dim=1 )
snake_case_ = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
snake_case_ = deterministic
snake_case_ = torch.exp(0.5 * self.logvar )
snake_case_ = torch.exp(self.logvar )
if self.deterministic:
snake_case_ = snake_case_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCAmelCase__ ( self , a__ = None ) -> torch.FloatTensor:
'''simple docstring'''
snake_case_ = randn_tensor(
self.mean.shape , generator=a__ , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case_ = self.mean + self.std * sample
return x
def lowerCAmelCase__ ( self , a__=None ) -> List[str]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCAmelCase__ ( self , a__ , a__=[1, 2, 3] ) -> Optional[int]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
snake_case_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return self.mean
| 400
| 1
|
"""simple docstring"""
def lowercase__ ( lowercase_ = 10 ) -> List[str]:
"""simple docstring"""
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ) or n < 0:
raise ValueError("Invalid input" )
_UpperCamelCase : List[str] = 10**n
_UpperCamelCase : Tuple = 28_433 * (pow(2 ,7_830_457 ,lowerCamelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 714
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51
| 0
|
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase = """docs/source/en/_toctree.yml"""
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = defaultdict(lowercase_ )
for doc in model_doc:
counts[doc["local"]] += 1
__UpperCAmelCase : List[str] = [key for key, value in counts.items() if value > 1]
__UpperCAmelCase : Optional[int] = []
for duplicate_key in duplicates:
__UpperCAmelCase : Optional[int] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowercase_ ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowercase_ , key=lambda lowercase_ : s["title"].lower() )
def __SCREAMING_SNAKE_CASE ( lowercase_=False ) -> str:
'''simple docstring'''
with open(lowercase_ , encoding='''utf-8''' ) as f:
__UpperCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase : Union[str, Any] = content[api_idx]['''sections''']
# Then to the model doc
__UpperCAmelCase : Optional[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__UpperCAmelCase : Union[str, Any] = api_doc[model_idx]['''sections''']
__UpperCAmelCase : int = [(idx, section) for idx, section in enumerate(lowercase_ ) if '''sections''' in section]
__UpperCAmelCase : Dict = False
for idx, modality_doc in modalities_docs:
__UpperCAmelCase : Optional[int] = modality_doc['''sections''']
__UpperCAmelCase : Dict = clean_model_doc_toc(lowercase_ )
if old_modality_doc != new_modality_doc:
__UpperCAmelCase : Optional[Any] = True
if overwrite:
__UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
__UpperCAmelCase : Tuple = model_doc
__UpperCAmelCase : Optional[Any] = api_doc
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 462
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase = {
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
lowerCAmelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase : Optional[int] = MBartTokenizer
_lowerCAmelCase : List[int] = []
_lowerCAmelCase : List[int] = []
def __init__( self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
__UpperCAmelCase : Optional[Any] = vocab_file
__UpperCAmelCase : int = False if not self.vocab_file else True
__UpperCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens})
__UpperCAmelCase : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(lowercase__) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__UpperCAmelCase : Any = src_lang if src_lang is not None else '''en_XX'''
__UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(self._src_lang)
__UpperCAmelCase : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def A( self):
return self._src_lang
@src_lang.setter
def A( self , lowercase__):
__UpperCAmelCase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def A( self , lowercase__ , lowercase__ = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
__UpperCAmelCase : Optional[Any] = src_lang
__UpperCAmelCase : Union[str, Any] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__)
__UpperCAmelCase : int = self.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : Tuple = tgt_lang_id
return inputs
def A( self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ):
__UpperCAmelCase : Any = src_lang
__UpperCAmelCase : Tuple = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__)
def A( self):
return self.set_src_lang_special_tokens(self.src_lang)
def A( self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def A( self , lowercase__):
__UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
__UpperCAmelCase : int = self.convert_ids_to_tokens(self.prefix_tokens)
__UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
__UpperCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def A( self , lowercase__):
__UpperCAmelCase : List[str] = self.convert_tokens_to_ids(lowercase__)
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[Any] = [self.eos_token_id, self.cur_lang_code]
__UpperCAmelCase : Tuple = self.convert_ids_to_tokens(self.prefix_tokens)
__UpperCAmelCase : Tuple = self.convert_ids_to_tokens(self.suffix_tokens)
__UpperCAmelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def A( self , lowercase__ , lowercase__ = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(lowercase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
__UpperCAmelCase : List[Any] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase__):
copyfile(self.vocab_file , lowercase__)
return (out_vocab_file,)
| 462
| 1
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 720
|
"""simple docstring"""
import argparse
from collections import defaultdict
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->List[str]:
"""simple docstring"""
__lowercase : Tuple = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(_lowerCamelCase, "r" ) as f:
__lowercase : Union[str, Any] = f.readlines()
__lowercase : str = F'class {class_name}('
__lowercase : Union[str, Any] = F'{4 * " "}def {test_name}('
__lowercase : List[Any] = F'{8 * " "}{correct_line.split()[0]}'
__lowercase : Optional[int] = F'{16 * " "}{correct_line.split()[0]}'
__lowercase : List[Any] = False
__lowercase : int = False
__lowercase : int = False
__lowercase : List[Any] = False
__lowercase : str = 0
__lowercase : Dict = 0
__lowercase : Optional[int] = []
for line in lines:
if line.startswith(_lowerCamelCase ):
__lowercase : Optional[int] = True
elif in_class and line.startswith(_lowerCamelCase ):
__lowercase : Optional[int] = True
elif in_class and in_func and (line.startswith(_lowerCamelCase ) or line.startswith(_lowerCamelCase )):
__lowercase : int = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__lowercase : Any = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__lowercase : int = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
__lowercase : List[Any] = False
else:
new_lines.append(_lowerCamelCase )
with open(_lowerCamelCase, "w" ) as f:
for line in new_lines:
f.write(_lowerCamelCase )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase=None ) ->List[Any]:
"""simple docstring"""
if fail is not None:
with open(_lowerCamelCase, "r" ) as f:
__lowercase : str = {l.strip() for l in f.readlines()}
else:
__lowercase : List[Any] = None
with open(_lowerCamelCase, "r" ) as f:
__lowercase : str = f.readlines()
__lowercase : List[str] = defaultdict(_lowerCamelCase )
for line in correct_lines:
__lowercase ,__lowercase ,__lowercase ,__lowercase : str = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__A : Union[str, Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 281
| 0
|
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :float , a_ :float , a_ :float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError('''You cannot supply more or less than 2 values''')
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''')
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''')
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''')
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : List[str] = AlbertTokenizer
A_ : List[Any] = AlbertTokenizerFast
A_ : List[str] = True
A_ : Any = True
A_ : List[Any] = True
def _A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : int = AlbertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : Tuple , a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "this is a test"
lowerCAmelCase__ : Optional[Any] = "this is a test"
return input_text, output_text
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "<pad>"
lowerCAmelCase__ : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(a__ ) , 3_0000 )
def _A ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def _A ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase__ : List[str] = "I was born in 92000, and this is falsé."
lowerCAmelCase__ : Dict = tokenizer.tokenize(a__ )
lowerCAmelCase__ : str = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
lowerCAmelCase__ : List[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
lowerCAmelCase__ : Tuple = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
lowerCAmelCase__ : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase__ : str = tokenizer.encode(a__ )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = AlbertTokenizer(a__ , keep_accents=a__ )
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a__ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [48, 25, 21, 1289] )
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
lowerCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowerCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AlbertTokenizer(a__ )
lowerCAmelCase__ : List[str] = tokenizer.encode("sequence builders" )
lowerCAmelCase__ : Optional[int] = tokenizer.encode("multi-sequence build" )
lowerCAmelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(a__ )
lowerCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 378
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
def __init__( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : List[Any]=99 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : List[Any]="last" , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=0 , ):
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : int = batch_size
UpperCamelCase__ : List[Any] = seq_length
UpperCamelCase__ : Optional[Any] = is_training
UpperCamelCase__ : List[str] = use_input_lengths
UpperCamelCase__ : List[Any] = use_token_type_ids
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : Tuple = gelu_activation
UpperCamelCase__ : Union[str, Any] = sinusoidal_embeddings
UpperCamelCase__ : Dict = causal
UpperCamelCase__ : Dict = asm
UpperCamelCase__ : List[Any] = n_langs
UpperCamelCase__ : Dict = vocab_size
UpperCamelCase__ : Optional[Any] = n_special
UpperCamelCase__ : Tuple = hidden_size
UpperCamelCase__ : List[Any] = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : List[str] = type_sequence_label_size
UpperCamelCase__ : Tuple = initializer_range
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : List[str] = num_choices
UpperCamelCase__ : str = summary_type
UpperCamelCase__ : Optional[int] = use_proj
UpperCamelCase__ : Optional[int] = scope
UpperCamelCase__ : Tuple = bos_token_id
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length])
UpperCamelCase__ : Optional[int] = None
if self.use_input_lengths:
UpperCamelCase__ : int = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ : List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : Optional[Any] = None
if self.use_labels:
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , 2).float()
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices)
UpperCamelCase__ : Union[str, Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCamelCase ( self : int):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , ):
UpperCamelCase__ : Optional[int] = XLMModel(config=SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
UpperCamelCase__ : List[str] = model(SCREAMING_SNAKE_CASE_ , lengths=SCREAMING_SNAKE_CASE_ , langs=SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , langs=SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : Any = model(SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , ):
UpperCamelCase__ : Union[str, Any] = XLMWithLMHeadModel(SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
UpperCamelCase__ : List[str] = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , ):
UpperCamelCase__ : int = XLMForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
UpperCamelCase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : List[str] = model(SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : Tuple = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , ):
UpperCamelCase__ : List[Any] = XLMForQuestionAnswering(SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
UpperCamelCase__ : Any = model(SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : int = model(
SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , cls_index=SCREAMING_SNAKE_CASE_ , is_impossible=SCREAMING_SNAKE_CASE_ , p_mask=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ : List[str] = model(
SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , cls_index=SCREAMING_SNAKE_CASE_ , is_impossible=SCREAMING_SNAKE_CASE_ , )
((UpperCamelCase__ ), ) : Tuple = result_with_labels.to_tuple()
UpperCamelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_)
((UpperCamelCase__ ), ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def __UpperCamelCase ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Tuple = XLMForSequenceClassification(SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
UpperCamelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , ):
UpperCamelCase__ : Dict = self.num_labels
UpperCamelCase__ : Any = XLMForTokenClassification(SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
UpperCamelCase__ : Tuple = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , ):
UpperCamelCase__ : Optional[int] = self.num_choices
UpperCamelCase__ : Tuple = XLMForMultipleChoice(config=SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
UpperCamelCase__ : str = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCamelCase__ : Optional[int] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCamelCase__ : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCamelCase__ : int = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCamelCase ( self : int):
UpperCamelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
),
) : Dict = config_and_inputs
UpperCamelCase__ : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class __lowercase (__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
_lowerCamelCase = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict=False):
UpperCamelCase__ : List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCamelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_)
return inputs_dict
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Tuple = XLMModelTester(self)
UpperCamelCase__ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , emb_dim=37)
def __UpperCamelCase ( self : int):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*SCREAMING_SNAKE_CASE_)
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*SCREAMING_SNAKE_CASE_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*SCREAMING_SNAKE_CASE_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*SCREAMING_SNAKE_CASE_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*SCREAMING_SNAKE_CASE_)
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*SCREAMING_SNAKE_CASE_)
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*SCREAMING_SNAKE_CASE_)
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Optional[int]=1):
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(
[isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for iter_attentions in attentions] , [True] * len(SCREAMING_SNAKE_CASE_))
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(SCREAMING_SNAKE_CASE_):
# adds PAD dummy token
UpperCamelCase__ : Union[str, Any] = min_length + idx + 1
UpperCamelCase__ : str = min_length + idx + 1
UpperCamelCase__ : List[str] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(SCREAMING_SNAKE_CASE_))
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : int=1):
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
self.assertListEqual(
[isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for iter_hidden_states in hidden_states] , [True] * len(SCREAMING_SNAKE_CASE_) , )
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(SCREAMING_SNAKE_CASE_):
# adds PAD dummy token
UpperCamelCase__ : str = min_length + idx + 1
UpperCamelCase__ : Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(SCREAMING_SNAKE_CASE_) , )
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = XLMModel.from_pretrained(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
@require_torch
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__ : Optional[int] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(SCREAMING_SNAKE_CASE_)
UpperCamelCase__ : str = torch.tensor([[14, 447]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE_) # the president
UpperCamelCase__ : int = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCamelCase__ : List[str] = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , SCREAMING_SNAKE_CASE_)
| 596
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__magic_name__ =logging.getLogger(__name__)
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ="token-classification"
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE_ ) == dict:
UpperCamelCase__ = Namespace(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = import_module('''tasks''' )
try:
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , hparams.task_type )
UpperCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
UpperCamelCase__ = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase__ = CrossEntropyLoss().ignore_index
super().__init__(SCREAMING_SNAKE_CASE_ , len(self.labels ) , self.mode )
def _a (self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return self.model(**SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ = self._feature_file(SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCamelCase__ = self.token_classification_task.read_examples_from_file(args.data_dir , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , SCREAMING_SNAKE_CASE_ )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> DataLoader:
'''simple docstring'''
UpperCamelCase__ = self._feature_file(SCREAMING_SNAKE_CASE_ )
logger.info('''Loading features from cached file %s''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , batch_size=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
"""Compute validation""" ""
UpperCamelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = outputs[:2]
UpperCamelCase__ = logits.detach().cpu().numpy()
UpperCamelCase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCamelCase__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCamelCase__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=2 )
UpperCamelCase__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCamelCase__ = dict(enumerate(self.labels ) )
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase__ = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
'''precision''': precision_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
'''recall''': recall_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
'''f1''': fa_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
}
UpperCamelCase__ = dict(results.items() )
UpperCamelCase__ = results
return ret, preds_list, out_label_list
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a (self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(SCREAMING_SNAKE_CASE_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=SCREAMING_SNAKE_CASE_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=SCREAMING_SNAKE_CASE_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=SCREAMING_SNAKE_CASE_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__magic_name__ =NERTransformer.add_model_specific_args(parser, os.getcwd())
__magic_name__ =parser.parse_args()
__magic_name__ =NERTransformer(args)
__magic_name__ =generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__magic_name__ =sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__magic_name__ =model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 415
| 0
|
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """facebook/bart-large-mnli"""
_SCREAMING_SNAKE_CASE = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_SCREAMING_SNAKE_CASE = """text_classifier"""
_SCREAMING_SNAKE_CASE = AutoTokenizer
_SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification
_SCREAMING_SNAKE_CASE = ["""text""", ["""text"""]]
_SCREAMING_SNAKE_CASE = ["""text"""]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
super().setup()
lowerCAmelCase_ : List[str] = self.model.config
lowerCAmelCase_ : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
lowerCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE_ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : str = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE_ ) , [F"This example is {label}" for label in labels] , return_tensors='pt' , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Tuple = outputs.logits
lowerCAmelCase_ : Union[str, Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 716
|
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase__ : Optional[int] = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
lowercase__ : Optional[Any] = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : str = list(state_dict.keys() )
for name in state_dict_keys:
lowerCAmelCase_ : List[Any] = state_dict.pop(lowerCAmelCase__ )
# emb -> embedding
if name.startswith('emb.' ):
lowerCAmelCase_ : Dict = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
lowerCAmelCase_ : str = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
lowerCAmelCase_ : Optional[Any] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , lowerCAmelCase__ )
# ffn -> feed_forward
lowerCAmelCase_ : Any = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , lowerCAmelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
lowerCAmelCase_ : str = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
lowerCAmelCase_ : int = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
lowerCAmelCase_ : Any = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
lowerCAmelCase_ : Optional[int] = 'rwkv.' + name
lowerCAmelCase_ : int = weight
return state_dict
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : int=None ) -> int:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
lowerCAmelCase_ : int = 5_0277
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
lowerCAmelCase_ : Dict = PreTrainedTokenizerFast(tokenizer_file=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = len(lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
# 2. Build the config
lowerCAmelCase_ : int = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCAmelCase_ : Tuple = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
lowerCAmelCase_ : Dict = RwkvConfig(
vocab_size=lowerCAmelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCAmelCase__ )
# 3. Download model file then convert state_dict
lowerCAmelCase_ : Dict = hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = torch.load(lowerCAmelCase__ , map_location='cpu' )
lowerCAmelCase_ : int = convert_state_dict(lowerCAmelCase__ )
# 4. Split in shards and save
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = shard_checkpoint(lowerCAmelCase__ )
for shard_file, shard in shards.items():
torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
if index is not None:
lowerCAmelCase_ : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
# Save the index as well
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase_ : str = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '\n'
f.write(lowerCAmelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
lowerCAmelCase_ : List[str] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCAmelCase_ : List[Any] = torch.load(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
lowerCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ , max_shard_size='2GB' )
tokenizer.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowercase__ : List[str] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 317
| 0
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def __snake_case ( lowerCAmelCase : Optional[int] ):
__UpperCAmelCase = min(lowerCAmelCase ) # min() finds the minimum value
__UpperCAmelCase = max(lowerCAmelCase ) # max() finds the maximum value
__UpperCAmelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__UpperCAmelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__UpperCAmelCase = 0
for count in range(lowerCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
__UpperCAmelCase = count + min_val
i += 1
def __snake_case ( ):
__UpperCAmelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase )
print('Sorted order is:' , ' '.join(lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 396
|
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Any ):
# Load checkpoint
__UpperCAmelCase = torch.load(lowerCAmelCase , map_location='cpu' )
__UpperCAmelCase = chkpt['model']
# We have the base model one level deeper than the original XLM repository
__UpperCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__UpperCAmelCase = v
else:
__UpperCAmelCase = v
__UpperCAmelCase = chkpt['params']
__UpperCAmelCase = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
__UpperCAmelCase = chkpt['dico_word2id']
__UpperCAmelCase = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
__UpperCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
__UpperCAmelCase = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase , lowerCAmelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase , indent=2 ) + '\n' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase , indent=2 ) + '\n' )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase : List[str] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 396
| 1
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
a = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class _A ( __lowercase ):
def __init__( self , **_SCREAMING_SNAKE_CASE ):
super().__init__(**_SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
_UpperCAmelCase = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_UpperCAmelCase = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_UpperCAmelCase = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_UpperCAmelCase = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_UpperCAmelCase = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_UpperCAmelCase = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_UpperCAmelCase = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_UpperCAmelCase = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_UpperCAmelCase = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_UpperCAmelCase = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_UpperCAmelCase = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_UpperCAmelCase = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 512 / 1500 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 1 , ):
_UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processor.size["""longest_edge"""]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.image_processor.generate_crop_boxes(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_UpperCAmelCase = self.get_inference_context()
with inference_context():
_UpperCAmelCase = self._ensure_tensor_on_device(_SCREAMING_SNAKE_CASE , device=self.device )
_UpperCAmelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_UpperCAmelCase = image_embeddings
_UpperCAmelCase = grid_points.shape[1]
_UpperCAmelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = grid_points[:, i : i + points_per_batch, :, :]
_UpperCAmelCase = input_labels[:, i : i + points_per_batch]
_UpperCAmelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.88 , _SCREAMING_SNAKE_CASE=0.95 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , ):
_UpperCAmelCase = model_inputs.pop("""input_boxes""" )
_UpperCAmelCase = model_inputs.pop("""is_last""" )
_UpperCAmelCase = model_inputs.pop("""original_sizes""" ).tolist()
_UpperCAmelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_UpperCAmelCase = self.model(**_SCREAMING_SNAKE_CASE )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_UpperCAmelCase = model_outputs["""pred_masks"""]
_UpperCAmelCase = self.image_processor.post_process_masks(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , binarize=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model_outputs["""iou_scores"""]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.7 , ):
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_UpperCAmelCase = torch.cat(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.cat(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.image_processor.post_process_for_mask_generation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = defaultdict(_SCREAMING_SNAKE_CASE )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {}
if output_rle_mask:
_UpperCAmelCase = rle_mask
if output_bboxes_mask:
_UpperCAmelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 700
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) - 1
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _SCREAMING_SNAKE_CASE ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_SCREAMING_SNAKE_CASE ) , 5 ) == 1
return output_values
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_UpperCAmelCase = self.basis_function(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
_UpperCAmelCase = [] # x coordinates of points to plot
_UpperCAmelCase = [] # y coordinates of points to plot
_UpperCAmelCase = 0.0
while t <= 1:
_UpperCAmelCase = self.bezier_curve_function(_SCREAMING_SNAKE_CASE )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_UpperCAmelCase = [i[0] for i in self.list_of_points]
_UpperCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 175
| 0
|
'''simple docstring'''
_lowercase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : int , snake_case_ : Union[str, Any]=1_3 , snake_case_ : Optional[Any]=7 , snake_case_ : List[str]=True , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=True , snake_case_ : List[str]=True , snake_case_ : Optional[Any]=9_9 , snake_case_ : int=3_2 , snake_case_ : str=5 , snake_case_ : int=4 , snake_case_ : List[str]=3_7 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Union[str, Any]=5_1_2 , snake_case_ : Union[str, Any]=1_6 , snake_case_ : List[Any]=2 , snake_case_ : List[Any]=0.0_2 , snake_case_ : Any=False , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]="None" , snake_case_ : Dict=3 , snake_case_ : Optional[int]=4 , snake_case_ : Any=None , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : List[Any] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : Dict = use_input_mask
snake_case__ : List[str] = use_token_type_ids
snake_case__ : int = use_labels
snake_case__ : int = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Union[str, Any] = type_sequence_label_size
snake_case__ : List[Any] = initializer_range
snake_case__ : Any = num_labels
snake_case__ : Tuple = num_choices
snake_case__ : List[str] = relative_attention
snake_case__ : Optional[int] = position_biased_input
snake_case__ : Union[str, Any] = pos_att_type
snake_case__ : Optional[Any] = scope
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = None
if self.use_input_mask:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : int = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[str] = None
snake_case__ : Tuple = None
snake_case__ : Any = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : int ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __magic_name__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : List[Any] = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
snake_case__ : Optional[int] = model(snake_case_ , token_type_ids=snake_case_ )[0]
snake_case__ : Dict = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Tuple = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.num_labels
snake_case__ : Optional[int] = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any ):
'''simple docstring'''
snake_case__ : List[Any] = self.num_labels
snake_case__ : Any = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : int , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Dict = config_and_inputs
snake_case__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : int = DebertaVaModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __magic_name__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def __magic_name__ ( self : str ):
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Optional[int] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
snake_case__ : Tuple = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
snake_case__ : Dict = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 347
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_snake_case : Optional[Any] = logging.get_logger(__name__)
def A__ ( UpperCamelCase ):
if isinstance(__snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__snake_case ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _UpperCAmelCase ( __lowercase ):
UpperCamelCase = ['''pixel_values''']
def __init__( self :Dict , __UpperCamelCase :bool = True , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase :bool = True , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :bool = True , __UpperCamelCase :Union[int, float] = 1 / 2_55 , __UpperCamelCase :bool = True , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Union[float, List[float]]] = None , __UpperCamelCase :Optional[Union[float, List[float]]] = None , **__UpperCamelCase :List[str] , ):
super().__init__(**_A )
A = size if size is not None else {"shortest_edge": 2_56}
A = get_size_dict(_A , default_to_square=_A )
A = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
A = get_size_dict(_A , param_name="crop_size" )
A = do_resize
A = size
A = do_center_crop
A = crop_size
A = resample
A = do_rescale
A = rescale_factor
A = offset
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :np.ndarray , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :Tuple , ):
A = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
A = get_resize_output_image_size(_A , size["shortest_edge"] , default_to_square=_A )
elif "height" in size and "width" in size:
A = (size["height"], size["width"])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :np.ndarray , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :Any , ):
A = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_A , size=(size["height"], size["width"]) , data_format=_A , **_A )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :np.ndarray , __UpperCamelCase :Union[int, float] , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :Tuple , ):
A = image.astype(np.floataa )
if offset:
A = image - (scale / 2)
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :np.ndarray , __UpperCamelCase :Union[float, List[float]] , __UpperCamelCase :Union[float, List[float]] , __UpperCamelCase :Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase :str , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :ImageInput , __UpperCamelCase :bool = None , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :PILImageResampling = None , __UpperCamelCase :bool = None , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :bool = None , __UpperCamelCase :float = None , __UpperCamelCase :bool = None , __UpperCamelCase :bool = None , __UpperCamelCase :Optional[Union[float, List[float]]] = None , __UpperCamelCase :Optional[Union[float, List[float]]] = None , __UpperCamelCase :Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
A = to_numpy_array(_A )
if do_resize:
A = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
A = self.center_crop(_A , size=_A )
if do_rescale:
A = self.rescale(image=_A , scale=_A , offset=_A )
if do_normalize:
A = self.normalize(image=_A , mean=_A , std=_A )
A = to_channel_dimension_format(_A , _A )
return image
def lowerCamelCase ( self :str , __UpperCamelCase :ImageInput , __UpperCamelCase :bool = None , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :PILImageResampling = None , __UpperCamelCase :bool = None , __UpperCamelCase :Dict[str, int] = None , __UpperCamelCase :bool = None , __UpperCamelCase :float = None , __UpperCamelCase :bool = None , __UpperCamelCase :bool = None , __UpperCamelCase :Optional[Union[float, List[float]]] = None , __UpperCamelCase :Optional[Union[float, List[float]]] = None , __UpperCamelCase :Optional[Union[str, TensorType]] = None , __UpperCamelCase :ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase :Dict , ):
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = offset if offset is not None else self.offset
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(_A , default_to_square=_A )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_A , param_name="crop_size" )
if not valid_images(_A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
A = make_batched(_A )
A = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , offset=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
A = {"pixel_values": videos}
return BatchFeature(data=_A , tensor_type=_A )
| 705
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = state_dict.pop(UpperCamelCase )
A = val
def A__ ( UpperCamelCase ):
A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
A = value
else:
A = value
return new_state_dict
def A__ ( UpperCamelCase ):
A = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A = in_proj_weight_cross_attn[:256, :]
A = in_proj_bias_cross_attn[:256]
A = in_proj_weight_cross_attn[256:512, :]
A = in_proj_bias_cross_attn[256:512]
A = in_proj_weight_cross_attn[-256:, :]
A = in_proj_bias_cross_attn[-256:]
def A__ ( UpperCamelCase , UpperCamelCase ):
A, A = image.size
A = max(UpperCamelCase , UpperCamelCase )
A = 800 if "detection" in checkpoint_url else 1_000
A = target_max_size / current_max_size
A = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def A__ ( UpperCamelCase ):
A = F.to_tensor(UpperCamelCase )
A = F.normalize(UpperCamelCase , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
logger.info("Converting model..." )
# load original state dict
A = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A = rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
A = state_dict.pop(UpperCamelCase )
A = val
# create HuggingFace model and load state dict
A = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A = 15
A = 2
A = {0: "table", 1: "table rotated"}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 125
A = 6
A = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
A = TableTransformerForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify our conversion
A = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
A = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=UpperCamelCase )
A = Image.open(UpperCamelCase ).convert("RGB" )
A = normalize(resize(UpperCamelCase , UpperCamelCase ) ).unsqueeze(0 )
A = model(UpperCamelCase )
if "detection" in checkpoint_url:
A = (1, 15, 3)
A = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
A = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
A = (1, 125, 7)
A = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
A = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
A = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(UpperCamelCase )
image_processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 524
| 0
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__lowerCAmelCase : int ="""\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__lowerCAmelCase : Union[str, Any] ="""\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__lowerCAmelCase : Union[str, Any] ="""
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def A__ ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = CHRF.CHAR_ORDER , __lowerCAmelCase = CHRF.WORD_ORDER , __lowerCAmelCase = CHRF.BETA , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , ):
"""simple docstring"""
lowercase = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
lowercase = CHRF(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = sb_chrf.corpus_score(__lowerCAmelCase , __lowerCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 359
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowerCAmelCase : int =pd.read_csv("""sample_data.csv""", header=None)
__lowerCAmelCase : Optional[Any] =df.shape[:1][0]
# If you're using some other dataset input the target column
__lowerCAmelCase : Optional[int] =df.iloc[:, 1:2]
__lowerCAmelCase : List[str] =actual_data.values.reshape(len_data, 1)
__lowerCAmelCase : int =MinMaxScaler().fit_transform(actual_data)
__lowerCAmelCase : List[Any] =1_0
__lowerCAmelCase : int =5
__lowerCAmelCase : str =2_0
__lowerCAmelCase : Union[str, Any] =len_data - periods * look_back
__lowerCAmelCase : Dict =actual_data[:division]
__lowerCAmelCase : List[str] =actual_data[division - look_back :]
__lowerCAmelCase , __lowerCAmelCase : List[str] =[], []
__lowerCAmelCase , __lowerCAmelCase : Optional[int] =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowerCAmelCase : int =np.array(train_x)
__lowerCAmelCase : List[str] =np.array(test_x)
__lowerCAmelCase : Dict =np.array([list(i.ravel()) for i in train_y])
__lowerCAmelCase : str =np.array([list(i.ravel()) for i in test_y])
__lowerCAmelCase : Optional[Any] =Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
__lowerCAmelCase : Optional[int] =model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__lowerCAmelCase : Dict =model.predict(x_test)
| 359
| 1
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1024 ):
A__ , A__ : Any = [], []
A__ : List[Any] = list(zip(lowerCAmelCase , lowerCAmelCase ) )
A__ , A__ : Optional[int] = sorted_examples[0]
def is_too_big(lowerCAmelCase ):
return tok(lowerCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A__ : List[Any] = new_src + """ """ + src
A__ : List[str] = new_tgt + """ """ + tgt
if is_too_big(lowerCAmelCase ) or is_too_big(lowerCAmelCase ): # cant fit, finalize example
finished_src.append(lowerCAmelCase )
finished_tgt.append(lowerCAmelCase )
A__ , A__ : str = src, tgt
else: # can fit, keep adding
A__ , A__ : Union[str, Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase )
finished_tgt.append(lowerCAmelCase )
return finished_src, finished_tgt
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Optional[int] = Path(lowerCAmelCase )
save_path.mkdir(exist_ok=lowerCAmelCase )
for split in ["train"]:
A__ , A__ : List[Any] = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
A__ : Tuple = [x.rstrip() for x in Path(lowerCAmelCase ).open().readlines()]
A__ : Union[str, Any] = [x.rstrip() for x in Path(lowerCAmelCase ).open().readlines()]
A__ , A__ : Tuple = pack_examples(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
print(F'''packed {split} split from {len(lowerCAmelCase )} examples -> {len(lowerCAmelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("""w""" ).write("""\n""".join(lowerCAmelCase ) )
Path(save_path / F'''{split}.target''' ).open("""w""" ).write("""\n""".join(lowerCAmelCase ) )
for split in ["val", "test"]:
A__ , A__ : Dict = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(lowerCAmelCase , save_path / F'''{split}.source''' )
shutil.copyfile(lowerCAmelCase , save_path / F'''{split}.target''' )
def _A( ):
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=lowerCAmelCase , default=128 )
parser.add_argument("""--data_dir""" , type=lowerCAmelCase )
parser.add_argument("""--save_path""" , type=lowerCAmelCase )
A__ : str = parser.parse_args()
A__ : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 363
|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_UpperCamelCase = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def _A( lowerCAmelCase ):
A__ : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def _A( lowerCAmelCase ):
A__ : str = list(s_dict.keys() )
for key in keys:
A__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : Optional[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(F'''{key} -> {new_key}''' )
A__ : Any = s_dict.pop(lowerCAmelCase )
return s_dict
def _A( lowerCAmelCase ):
A__ , A__ : Union[str, Any] = emb.weight.shape
A__ : Optional[Any] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
A__ : int = emb.weight.data
return lin_layer
def _A( lowerCAmelCase , lowerCAmelCase ):
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
A__ : Union[str, Any] = os.path.basename(lowerCAmelCase )
A__ : str = url.split("""/""" )[-2]
A__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ) and not os.path.isfile(lowerCAmelCase ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(lowerCAmelCase ):
A__ : Tuple = open(lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(lowerCAmelCase ) as source, open(lowerCAmelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
A__ : Dict = source.read(8192 )
if not buffer:
break
output.write(lowerCAmelCase )
loop.update(len(lowerCAmelCase ) )
A__ : List[str] = open(lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _A( lowerCAmelCase , lowerCAmelCase ):
if ".pt" not in checkpoint_path:
A__ : str = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowerCAmelCase , map_location="""cpu""" )
A__ : List[str] = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Dict = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowerCAmelCase )
rename_keys(lowerCAmelCase )
A__ : Any = True
A__ : List[str] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : str = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowerCAmelCase , decoder_ffn_dim=lowerCAmelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : int = WhisperForConditionalGeneration(lowerCAmelCase )
A__ , A__ : List[Any] = model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if len(lowerCAmelCase ) > 0 and not set(lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
A__ : Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : Any = proj_out_weights
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCamelCase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 363
| 1
|
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 133
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( a_ ):
'''simple docstring'''
if num <= 0:
lowerCamelCase : Tuple = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(a_ )
lowerCamelCase : Optional[Any] = [True] * (num + 1)
lowerCamelCase : int = []
lowerCamelCase : Dict = 2
lowerCamelCase : List[str] = int(math.sqrt(a_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a_ )
# Set multiples of start be False
for i in range(start * start, num + 1, a_ ):
if sieve[i] is True:
lowerCamelCase : Optional[int] = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(a_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 133
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class a ( lowercase__ ):
"""simple docstring"""
a : Dict = 'speech_to_text_2'
a : Optional[int] = ['past_key_values']
a : Union[str, Any] = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , __lowercase : Union[str, Any]=10000 , __lowercase : List[Any]=6 , __lowercase : Tuple=2048 , __lowercase : int=4 , __lowercase : Dict=0.0 , __lowercase : int=True , __lowercase : Optional[int]="relu" , __lowercase : Optional[int]=256 , __lowercase : int=0.1 , __lowercase : Optional[int]=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : Union[str, Any]=0.02 , __lowercase : Any=2 , __lowercase : List[Any]=True , __lowercase : Tuple=1 , __lowercase : str=0 , __lowercase : Tuple=2 , __lowercase : List[Any]=1024 , **__lowercase : Tuple , ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : int = d_model
__UpperCAmelCase : Any = decoder_ffn_dim
__UpperCAmelCase : List[Any] = decoder_layers
__UpperCAmelCase : Union[str, Any] = decoder_attention_heads
__UpperCAmelCase : List[str] = dropout
__UpperCAmelCase : List[Any] = attention_dropout
__UpperCAmelCase : Tuple = activation_dropout
__UpperCAmelCase : List[str] = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = decoder_layerdrop
__UpperCAmelCase : Tuple = use_cache
__UpperCAmelCase : Dict = decoder_layers
__UpperCAmelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : str = max_target_positions
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
| 63
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : List[Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : List[Any] = _get_torch_home()
except ImportError:
a : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a : Optional[Any] = os.path.join(torch_cache_home, "transformers")
a : Optional[Any] = "https://cdn.huggingface.co"
a : List[str] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a : Any = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a : Optional[int] = os.path.join(PATH, "config.yaml")
a : Dict = os.path.join(PATH, "attributes.txt")
a : Tuple = os.path.join(PATH, "objects.txt")
a : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a : Dict = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a : Any = "pytorch_model.bin"
a : int = "config.yaml"
def lowerCamelCase__ ( __lowerCamelCase : str=OBJECTS , __lowerCamelCase : Union[str, Any]=ATTRIBUTES ):
__UpperCAmelCase : Union[str, Any] = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__UpperCAmelCase : Dict = []
with open(__lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : List[str] = OrderedDict()
with open(__lowerCamelCase , """rb""" ) as f:
__UpperCAmelCase : int = pkl.load(__lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase : List[Any] = ckp.pop(__lowerCamelCase )
if isinstance(__lowerCamelCase , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase )
else:
assert isinstance(__lowerCamelCase , torch.tensor ), type(__lowerCamelCase )
__UpperCAmelCase : List[str] = v
return r
class a :
"""simple docstring"""
a : Dict = {}
def __init__( self : Dict , __lowercase : dict , __lowercase : str = "root" , __lowercase : Any=0 ) -> Dict:
__UpperCAmelCase : List[str] = name
__UpperCAmelCase : str = level
__UpperCAmelCase : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase : List[str] = copy.deepcopy(__lowercase )
__UpperCAmelCase : Dict = copy.deepcopy(__lowercase )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Union[str, Any] = Config(__lowercase , name=__lowercase , level=level + 1 )
__UpperCAmelCase : Union[str, Any] = v
setattr(self , __lowercase , __lowercase )
__UpperCAmelCase : Any = d
def __repr__( self : Optional[Any] ) -> Optional[int]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , __lowercase : List[str] , __lowercase : Tuple ) -> int:
__UpperCAmelCase : int = val
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = key.split(""".""" )
__UpperCAmelCase : List[Any] = len(__lowercase ) - 1
__UpperCAmelCase : List[Any] = self._pointer
if len(__lowercase ) > 1:
for i, l in enumerate(__lowercase ):
if hasattr(self , __lowercase ) and isinstance(getattr(self , __lowercase ) , __lowercase ):
setattr(getattr(self , __lowercase ) , """.""".join(levels[i:] ) , __lowercase )
if l == last_level:
__UpperCAmelCase : Union[str, Any] = val
else:
__UpperCAmelCase : Union[str, Any] = pointer[l]
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._pointer
def UpperCAmelCase ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]:
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Any:
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__lowercase , __lowercase )
@staticmethod
def UpperCAmelCase ( __lowercase : List[Any] ) -> Optional[Any]:
with open(__lowercase ) as stream:
__UpperCAmelCase : Any = load(__lowercase , Loader=__lowercase )
return data
def __str__( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = """ """
if self._name != "root":
__UpperCAmelCase : Optional[Any] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__UpperCAmelCase : List[Any] = """"""
__UpperCAmelCase : Optional[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__lowercase , __lowercase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__lowercase ).__name__})\n"""
__UpperCAmelCase : int = level
return r[:-1]
@classmethod
def UpperCAmelCase ( cls : List[str] , __lowercase : str , **__lowercase : Any ) -> Any:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = cls.get_config_dict(__lowercase , **__lowercase )
return cls(__lowercase )
@classmethod
def UpperCAmelCase ( cls : Dict , __lowercase : str , **__lowercase : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : int = kwargs.pop("""cache_dir""" , __lowercase )
__UpperCAmelCase : int = kwargs.pop("""force_download""" , __lowercase )
__UpperCAmelCase : str = kwargs.pop("""resume_download""" , __lowercase )
__UpperCAmelCase : Dict = kwargs.pop("""proxies""" , __lowercase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""local_files_only""" , __lowercase )
if os.path.isdir(__lowercase ):
__UpperCAmelCase : List[Any] = os.path.join(__lowercase , __lowercase )
elif os.path.isfile(__lowercase ) or is_remote_url(__lowercase ):
__UpperCAmelCase : Tuple = pretrained_model_name_or_path
else:
__UpperCAmelCase : Optional[int] = hf_bucket_url(__lowercase , filename=__lowercase , use_cdn=__lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase : Optional[int] = cached_path(
__lowercase , cache_dir=__lowercase , force_download=__lowercase , proxies=__lowercase , resume_download=__lowercase , local_files_only=__lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase : Optional[int] = Config.load_yaml(__lowercase )
except EnvironmentError:
__UpperCAmelCase : str = """Can't load config for"""
raise EnvironmentError(__lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__lowercase ), kwargs
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Optional[int] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__UpperCAmelCase : Tuple = in_tensor.numpy()
__UpperCAmelCase : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(__lowerCamelCase , __lowerCamelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Tuple = urlparse(__lowerCamelCase )
return parsed.scheme in ("http", "https")
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int=True ):
__UpperCAmelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase : Optional[int] = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=None , ):
__UpperCAmelCase : Optional[int] = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(__lowerCamelCase , __lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
ua += "; " + user_agent
__UpperCAmelCase : List[str] = {"""user-agent""": ua}
if resume_size > 0:
__UpperCAmelCase : Union[str, Any] = """bytes=%d-""" % (resume_size,)
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , stream=__lowerCamelCase , proxies=__lowerCamelCase , headers=__lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase : List[str] = response.headers.get("""Content-Length""" )
__UpperCAmelCase : str = resume_size + int(__lowerCamelCase ) if content_length is not None else None
__UpperCAmelCase : List[Any] = tqdm(
unit="""B""" , unit_scale=__lowerCamelCase , total=__lowerCamelCase , initial=__lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCamelCase ) )
temp_file.write(__lowerCamelCase )
progress.close()
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=10 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=None , __lowerCamelCase : List[str]=False , ):
if cache_dir is None:
__UpperCAmelCase : Optional[Any] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[str] = str(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
__UpperCAmelCase : List[Any] = None
if not local_files_only:
try:
__UpperCAmelCase : Optional[Any] = requests.head(__lowerCamelCase , allow_redirects=__lowerCamelCase , proxies=__lowerCamelCase , timeout=__lowerCamelCase )
if response.status_code == 200:
__UpperCAmelCase : Dict = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase : List[str] = url_to_filename(__lowerCamelCase , __lowerCamelCase )
# get cache path to put the file
__UpperCAmelCase : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCamelCase ):
return cache_path
else:
__UpperCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(__lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__lowerCamelCase ) > 0:
return os.path.join(__lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase : str = cache_path + """.lock"""
with FileLock(__lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase : int = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__lowerCamelCase , """a+b""" ) as f:
yield f
__UpperCAmelCase : str = _resumable_file_manager
if os.path.exists(__lowerCamelCase ):
__UpperCAmelCase : List[Any] = os.stat(__lowerCamelCase ).st_size
else:
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : str = partial(tempfile.NamedTemporaryFile , dir=__lowerCamelCase , delete=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , __lowerCamelCase , temp_file.name , )
http_get(
__lowerCamelCase , __lowerCamelCase , proxies=__lowerCamelCase , resume_size=__lowerCamelCase , user_agent=__lowerCamelCase , )
os.replace(temp_file.name , __lowerCamelCase )
__UpperCAmelCase : Any = {"""url""": url, """etag""": etag}
__UpperCAmelCase : Union[str, Any] = cache_path + """.json"""
with open(__lowerCamelCase , """w""" ) as meta_file:
json.dump(__lowerCamelCase , __lowerCamelCase )
return cache_path
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any]=None ):
__UpperCAmelCase : Tuple = url.encode("""utf-8""" )
__UpperCAmelCase : Optional[Any] = shaaaa(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = url_hash.hexdigest()
if etag:
__UpperCAmelCase : int = etag.encode("""utf-8""" )
__UpperCAmelCase : List[str] = shaaaa(__lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : str=False , ):
if cache_dir is None:
__UpperCAmelCase : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Any = str(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = str(__lowerCamelCase )
if is_remote_url(__lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase : Tuple = get_from_cache(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , user_agent=__lowerCamelCase , local_files_only=__lowerCamelCase , )
elif os.path.exists(__lowerCamelCase ):
# File, and it exists.
__UpperCAmelCase : Tuple = url_or_filename
elif urlparse(__lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCamelCase ) and not tarfile.is_tarfile(__lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase : int = os.path.split(__lowerCamelCase )
__UpperCAmelCase : Any = output_file.replace(""".""" , """-""" ) + """-extracted"""
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ) and os.listdir(__lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase : str = output_path + """.lock"""
with FileLock(__lowerCamelCase ):
shutil.rmtree(__lowerCamelCase , ignore_errors=__lowerCamelCase )
os.makedirs(__lowerCamelCase )
if is_zipfile(__lowerCamelCase ):
with ZipFile(__lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(__lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCamelCase ):
__UpperCAmelCase : Any = tarfile.open(__lowerCamelCase )
tar_file.extractall(__lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCamelCase ) )
return output_path_extracted
return output_path
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int="," ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[Any] = eval(f.read() )
else:
__UpperCAmelCase : List[str] = requests.get(__lowerCamelCase )
try:
__UpperCAmelCase : int = requests.json()
except Exception:
__UpperCAmelCase : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase : str = eval(__lowerCamelCase )
except Exception:
__UpperCAmelCase : List[Any] = data.split("""\n""" )
req.close()
return data
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = requests.get(__lowerCamelCase )
__UpperCAmelCase : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : int = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as stream:
__UpperCAmelCase : List[str] = pkl.load(__lowerCamelCase )
__UpperCAmelCase : Dict = weights.pop("""model""" )
__UpperCAmelCase : Union[str, Any] = {}
for k, v in model.items():
__UpperCAmelCase : int = torch.from_numpy(__lowerCamelCase )
if "running_var" in k:
__UpperCAmelCase : Optional[int] = torch.tensor([0] )
__UpperCAmelCase : Tuple = k.replace("""running_var""" , """num_batches_tracked""" )
__UpperCAmelCase : Any = zero
return new
def lowerCamelCase__ ( ):
print(f"""{os.path.abspath(os.path.join(__lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]="RGB" ):
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
__UpperCAmelCase : List[str] = cva.imread(__lowerCamelCase )
else:
__UpperCAmelCase : int = get_image_from_url(__lowerCamelCase )
assert img is not None, f"""could not connect to: {im}"""
__UpperCAmelCase : Any = cva.cvtColor(__lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase : Optional[int] = img[:, :, ::-1]
return img
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int=1 ):
return (images[i : i + batch] for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ))
| 63
| 1
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =RemBertConfig.from_json_file(__UpperCamelCase )
print("""Building PyTorch model from configuration: {}""".format(str(__UpperCamelCase ) ) )
SCREAMING_SNAKE_CASE__ =RemBertModel(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(__UpperCamelCase ) )
torch.save(model.state_dict(), __UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 709
|
import math
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =2
SCREAMING_SNAKE_CASE__ =int(math.sqrt(__UpperCamelCase ) ) # Size of every segment
SCREAMING_SNAKE_CASE__ =[True] * (end + 1)
SCREAMING_SNAKE_CASE__ =[]
while start <= end:
if temp[start] is True:
in_prime.append(__UpperCamelCase )
for i in range(start * start, end + 1, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE__ =end + 1
SCREAMING_SNAKE_CASE__ =min(2 * end, __UpperCamelCase )
while low <= n:
SCREAMING_SNAKE_CASE__ =[True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE__ =math.floor(low / each ) * each
if t < low:
t += each
for j in range(__UpperCamelCase, high + 1, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =False
for j in range(len(__UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE__ =high + 1
SCREAMING_SNAKE_CASE__ =min(high + end, __UpperCamelCase )
return prime
print(sieve(10**6))
| 588
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( ) -> Tuple:
snake_case__ : List[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
snake_case__ : Union[str, Any] = 6
snake_case__ : Any = 1
snake_case__ : List[str] = 1901
snake_case__ : str = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
snake_case__ : Tuple = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
snake_case__ : Optional[Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
snake_case__ : str = day - days_per_month[month - 2]
if month > 12:
year += 1
snake_case__ : Any = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 270
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : str=[10, 20, 30, 40] , __lowerCamelCase : Any=[2, 2, 3, 2] , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : str=["stage2", "stage3", "stage4"] , __lowerCamelCase : Optional[Any]=[2, 3, 4] , __lowerCamelCase : Dict=None , ):
snake_case__ : List[str] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : int = image_size
snake_case__ : Tuple = num_channels
snake_case__ : Any = num_stages
snake_case__ : Any = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : Optional[Any] = is_training
snake_case__ : Dict = use_labels
snake_case__ : Any = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : Any = num_labels
snake_case__ : Optional[int] = initializer_range
snake_case__ : Union[str, Any] = out_features
snake_case__ : Optional[Any] = out_indices
snake_case__ : Optional[int] = scope
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Tuple ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
snake_case__ : Optional[int] = ConvNextModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Union[str, Any] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
snake_case__ : Optional[Any] = ConvNextForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : int ):
snake_case__ : Dict = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : str = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = ConvNextBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : int = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCAmelCase ( self : Tuple ):
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = config_and_inputs
snake_case__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def _lowerCAmelCase ( self : Any ):
snake_case__ : Tuple = ConvNextModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Dict ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def _lowerCAmelCase ( self : Tuple ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def _lowerCAmelCase ( self : List[Any] ):
pass
def _lowerCAmelCase ( self : str ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(__lowerCamelCase )
snake_case__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCAmelCase ( self : int ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : int ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def _lowerCAmelCase ( self : str ):
def check_hidden_states_output(__lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple ):
snake_case__ : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
snake_case__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Tuple = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = ConvNextModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__ ( ) -> Optional[Any]:
snake_case__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : List[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Optional[int] ):
snake_case__ : str = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(__lowerCamelCase )
snake_case__ : str = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : List[Any] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : int = model(**__lowerCamelCase )
# verify the logits
snake_case__ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
snake_case__ : int = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@require_torch
class lowercase_ ( unittest.TestCase , lowerCAmelCase_ ):
A_ = (ConvNextBackbone,) if is_torch_available() else ()
A_ = ConvNextConfig
A_ = False
def _lowerCAmelCase ( self : List[Any] ):
snake_case__ : List[Any] = ConvNextModelTester(self )
| 270
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=3 , UpperCamelCase=18 , UpperCamelCase=30 , UpperCamelCase=4_00 , UpperCamelCase=True , UpperCamelCase=None , UpperCamelCase=True , ):
lowerCamelCase__ = size if size is not None else {"height": 18, "width": 18}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = apply_ocr
def __UpperCAmelCase ( self):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case_ ( A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : str =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self):
lowerCamelCase__ = LayoutLMvaImageProcessingTester(self)
@property
def __UpperCAmelCase ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase , "size"))
self.assertTrue(hasattr(UpperCamelCase , "apply_ocr"))
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 18, "width": 18})
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
def __UpperCAmelCase ( self):
pass
def __UpperCAmelCase ( self):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image)
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , UpperCamelCase)
self.assertIsInstance(encoding.boxes , UpperCamelCase)
# Test batched
lowerCamelCase__ = image_processing(UpperCamelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __UpperCAmelCase ( self):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray)
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase__ = image_processing(UpperCamelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __UpperCAmelCase ( self):
# Initialize image_processing
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor)
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase__ = image_processing(UpperCamelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __UpperCAmelCase ( self):
# with apply_OCR = True
lowerCamelCase__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCamelCase__ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test")
lowerCamelCase__ = Image.open(ds[0]["file"]).convert("RGB")
lowerCamelCase__ = image_processing(UpperCamelCase , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCamelCase__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCamelCase__ = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCamelCase)
self.assertListEqual(encoding.boxes , UpperCamelCase)
# with apply_OCR = False
lowerCamelCase__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase)
lowerCamelCase__ = image_processing(UpperCamelCase , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24))
| 426
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : Node | None =None
__lowerCAmelCase : Node | None =None
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = Node(1 )
lowerCamelCase__ = Node(2 )
lowerCamelCase__ = Node(3 )
lowerCamelCase__ = Node(4 )
lowerCamelCase__ = Node(5 )
return tree
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
lowerCamelCase__ = []
if root is None:
return output
lowerCamelCase__ = deque([root] )
while process_queue:
lowerCamelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCAmelCase( a__ : Node | None , a__ : int ):
'''simple docstring'''
lowerCamelCase__ = []
def populate_output(a__ : Node | None , a__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(a__ , a__ )
return output
def lowerCAmelCase( a__ : Node | None , a__ : int ):
'''simple docstring'''
lowerCamelCase__ = []
def populate_output(a__ : Node | None , a__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(a__ , a__ )
return output
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
if root is None:
return []
lowerCamelCase__ = []
lowerCamelCase__ = 0
lowerCamelCase__ = height(a__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(a__ , a__ ) )
lowerCamelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(a__ , a__ ) )
lowerCamelCase__ = 0
return output
def lowerCAmelCase( ): # Main function for testing.
'''simple docstring'''
lowerCamelCase__ = make_tree()
print(f"""In-order Traversal: {inorder(a__ )}""" )
print(f"""Pre-order Traversal: {preorder(a__ )}""" )
print(f"""Post-order Traversal: {postorder(a__ )}""" , "\n" )
print(f"""Height of Tree: {height(a__ )}""" , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(a__ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(a__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(a__ , level=a__ ) )
print("\nZigZag order Traversal: " )
print(zigzag(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 426
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 224
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase_ = _modexpt(SCREAMING_SNAKE_CASE_ , exponent // 2 , SCREAMING_SNAKE_CASE_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE_ , exponent - 1 , SCREAMING_SNAKE_CASE_ )) % modulo_value
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 1_7_7_7 , SCREAMING_SNAKE_CASE_ = 1_8_5_5 , SCREAMING_SNAKE_CASE_ = 8 )-> int:
"""simple docstring"""
UpperCamelCase_ = base
for _ in range(1 , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = _modexpt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1_0**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 628
| 0
|
from __future__ import annotations
def _lowerCAmelCase( __A ):
if not nums:
raise ValueError("List is empty" )
return sum(__A ) / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
|
def _lowerCAmelCase( __A ):
if not isinstance(__A , __A ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase = str(abs(__A ) )
UpperCAmelCase = [list(__A ) for char in range(len(__A ) )]
for index in range(len(__A ) ):
num_transpositions[index].pop(__A )
return max(
int("".join(list(__A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 1
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__UpperCAmelCase = {'mobilebert-uncased': 512}
__UpperCAmelCase = {}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = VOCAB_FILES_NAMES
_snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[Any] = PRETRAINED_INIT_CONFIGURATION
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = MobileBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Optional[Any]:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : List[str] = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : Dict = strip_accents
UpperCAmelCase_ : Any = tokenize_chinese_chars
UpperCAmelCase_ : Any = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : str = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 406
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase_ : Optional[int] = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
UpperCAmelCase_ : Optional[Any] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
UpperCAmelCase_ : List[str] = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
UpperCAmelCase_ : Dict = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
UpperCAmelCase_ : Any = orig_key.split('.' )[0].split('_' )[-1]
UpperCAmelCase_ : Optional[Any] = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
UpperCAmelCase_ : List[str] = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
UpperCAmelCase_ : Union[str, Any] = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
UpperCAmelCase_ : Any = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
UpperCAmelCase_ : Tuple = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
UpperCAmelCase_ : List[str] = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
UpperCAmelCase_ : str = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
UpperCAmelCase_ : Dict = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
UpperCAmelCase_ : Optional[int] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
UpperCAmelCase_ : Optional[Any] = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
UpperCAmelCase_ : Union[str, Any] = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
UpperCAmelCase_ : List[Any] = 'yoso.' + orig_key
return orig_key
def lowercase__ ( __snake_case : str , __snake_case : int ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Any = orig_state_dict.pop(__snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase_ : Union[str, Any] = val
UpperCAmelCase_ : List[Any] = orig_state_dict['cls.predictions.decoder.bias']
UpperCAmelCase_ : Tuple = torch.arange(__snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase__ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = torch.load(__snake_case , map_location='cpu' )['model_state_dict']
UpperCAmelCase_ : Dict = YosoConfig.from_json_file(__snake_case )
UpperCAmelCase_ : str = YosoForMaskedLM(__snake_case )
UpperCAmelCase_ : Dict = convert_checkpoint_helper(config.max_position_embeddings , __snake_case )
print(model.load_state_dict(__snake_case ) )
model.eval()
model.save_pretrained(__snake_case )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 406
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
A_ : List[str] = '''backbone.''' if is_semantic else ''''''
A_ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
for i in range(config.num_hidden_layers ):
A_ : Union[str, Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
A_ : Union[str, Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A_ : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A_ : Optional[int] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A_ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Tuple = q_bias
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Optional[Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A_ : Dict = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A_ : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A_ : Dict = gamma_a
A_ : int = gamma_a
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = dct.pop(SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = val
def _SCREAMING_SNAKE_CASE ( ):
A_ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
A_ : List[str] = False if '''rvlcdip''' in checkpoint_url else True
A_ : Optional[int] = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE , use_mask_token=SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A_ : Any = 1_024
A_ : int = 4_096
A_ : Optional[Any] = 24
A_ : int = 16
# labels
if "rvlcdip" in checkpoint_url:
A_ : int = 16
A_ : Any = '''huggingface/label-files'''
A_ : Tuple = '''rvlcdip-id2label.json'''
A_ : Optional[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
A_ : Tuple = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
A_ : List[Any] = idalabel
A_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A_ : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
A_ : str = create_rename_keys(SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
# load HuggingFace model
A_ : Union[str, Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image
A_ : Optional[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = prepare_img()
A_ : Dict = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
A_ : Optional[Any] = encoding['''pixel_values''']
A_ : str = model(SCREAMING_SNAKE_CASE )
A_ : str = outputs.logits
# verify logits
A_ : Any = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
A_ : int = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
A_ : Optional[int] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
UpperCamelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 152
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = DiTPipeline
snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case = False
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : str = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
A_ : Union[str, Any] = AutoencoderKL()
A_ : Optional[Any] = DDIMScheduler()
A_ : str = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Union[str, Any]:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[int] = '''cpu'''
A_ : Any = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
A_ : Tuple = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
A_ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self )->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = torch.manual_seed(0 )
A_ : int = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
A_ : Any = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
A_ : Any = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
A_ : List[str] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : List[Any] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Tuple = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
A_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
A_ : List[str] = ['''vase''', '''umbrella''']
A_ : List[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Tuple = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 152
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __UpperCamelCase :
def __init__( self :Optional[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Dict=None ,_UpperCamelCase :str=None ,_UpperCamelCase :int=None ,_UpperCamelCase :Dict="resnet50" ,_UpperCamelCase :Any=3 ,_UpperCamelCase :str=3_2 ,_UpperCamelCase :Any=3 ,_UpperCamelCase :str=True ,_UpperCamelCase :Dict=True ,):
snake_case_ : List[Any] = parent
snake_case_ : Union[str, Any] = out_indices if out_indices is not None else [4]
snake_case_ : Any = stage_names
snake_case_ : Optional[Any] = out_features
snake_case_ : Any = backbone
snake_case_ : Optional[Any] = batch_size
snake_case_ : Tuple = image_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = use_pretrained_backbone
snake_case_ : str = is_training
def a__ ( self :Optional[Any] ):
snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[str] = self.get_config()
return config, pixel_values
def a__ ( self :Tuple ):
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def a__ ( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :Dict ):
snake_case_ : str = TimmBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 1_4, 1_4) ,)
def a__ ( self :List[Any] ):
snake_case_ : List[Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ : str = config_and_inputs
snake_case_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
lowercase : Dict = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
lowercase : Optional[Any] = False
lowercase : List[str] = False
lowercase : List[Any] = False
lowercase : str = False
def a__ ( self :int ):
snake_case_ : int = TimmBackboneModelTester(self )
snake_case_ : str = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,has_text_modality=__SCREAMING_SNAKE_CASE )
def a__ ( self :Union[str, Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self :Optional[Any] ):
snake_case_ : Any = """resnet18"""
snake_case_ : Tuple = """microsoft/resnet-18"""
snake_case_ : str = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE ,use_timm_backbone=__SCREAMING_SNAKE_CASE )
snake_case_ : int = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
snake_case_ : Optional[Any] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE ,use_timm_backbone=__SCREAMING_SNAKE_CASE ,out_indices=[1, 2, 3] )
snake_case_ : List[str] = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip("""TimmBackbone doesn\'t support feed forward chunking""" )
def a__ ( self :int ):
pass
@unittest.skip("""TimmBackbone doesn\'t have num_hidden_layers attribute""" )
def a__ ( self :Any ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def a__ ( self :List[str] ):
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def a__ ( self :List[str] ):
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def a__ ( self :Dict ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def a__ ( self :List[str] ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def a__ ( self :Tuple ):
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def a__ ( self :Dict ):
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def a__ ( self :List[Any] ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def a__ ( self :List[str] ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def a__ ( self :Any ):
pass
@unittest.skip("""TimmBackbone doesn\'t have hidden size info in its configuration.""" )
def a__ ( self :Dict ):
pass
@unittest.skip("""TimmBackbone doesn\'t support output_attentions.""" )
def a__ ( self :int ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def a__ ( self :Union[str, Any] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self :Any ):
pass
def a__ ( self :Optional[Any] ):
snake_case_ , snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = model_class(__SCREAMING_SNAKE_CASE )
snake_case_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__SCREAMING_SNAKE_CASE )
def a__ ( self :Optional[int] ):
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[Any] = True
snake_case_ : int = self.has_attentions
# no need to test all models as different heads yield the same functionality
snake_case_ : List[str] = self.all_model_classes[0]
snake_case_ : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
snake_case_ : Any = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = outputs[0][-1]
# Encoder-/Decoder-only models
snake_case_ : Dict = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
snake_case_ : Tuple = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a__ ( self :Any ):
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : int = model(**__SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
snake_case_ : Optional[Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE )
snake_case_ : str = None
snake_case_ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
snake_case_ : Optional[Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
| 334
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ =logging.get_logger()
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_128s''' , pretrained=UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_128''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 1_92:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_192''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 2_56:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_256''' , pretrained=UpperCAmelCase__ )
if hidden_sizes == 3_84:
__SCREAMING_SNAKE_CASE = timm.create_model('''levit_384''' , pretrained=UpperCAmelCase__ )
from_model.eval()
__SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE = from_model.state_dict()
__SCREAMING_SNAKE_CASE = list(from_model.state_dict().keys() )
__SCREAMING_SNAKE_CASE = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for i in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.randn((2, 3, 2_24, 2_24) )
__SCREAMING_SNAKE_CASE = from_model(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = our_model(UpperCAmelCase__ ).logits
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), "The model logits don't match the original one."
__SCREAMING_SNAKE_CASE = name
print(UpperCAmelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__SCREAMING_SNAKE_CASE = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = (1, num_labels)
__SCREAMING_SNAKE_CASE = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = partial(UpperCAmelCase__ , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
__SCREAMING_SNAKE_CASE = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCAmelCase__ , names_to_config[model_name] , UpperCAmelCase__ , UpperCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
lowerCAmelCase__ =parser.parse_args()
lowerCAmelCase__ =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 482
| 0
|
from bisect import bisect
from itertools import accumulate
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = sorted(zip(lowerCAmelCase , lowerCAmelCase ) , key=lambda lowerCAmelCase : x[0] / x[1] , reverse=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(accumulate(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = bisect(lowerCAmelCase , lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class a__ :
def __init__( self : Union[str, Any],_A : list[tuple[float, float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE_ : List[Any] = len(_A ) - 1
def __UpperCamelCase ( self : Any,_A : float ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,_A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_A ),5 ) == 1
return output_values
def __UpperCamelCase ( self : str,_A : float ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.basis_function(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.0
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCamelCase ( self : Any,_A : float = 0.01 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE_ : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE_ : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.bezier_curve_function(_A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE_ : Tuple = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE_ : Tuple = [i[1] for i in self.list_of_points]
plt.plot(
_A,_A,color="blue",label="Curve of Degree " + str(self.degree ),)
plt.scatter(_A,_A,color="red",label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 316
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowercase : Any = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowercase : Dict = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
_lowercase : int = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def _UpperCAmelCase ( self , a__ , a__ , a__ = CHRF.CHAR_ORDER , a__ = CHRF.WORD_ORDER , a__ = CHRF.BETA , a__ = False , a__ = False , a__ = False , ) -> Union[str, Any]:
A = len(references[0] )
if any(len(a__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A = [[refs[i] for refs in references] for i in range(a__ )]
A = CHRF(a__ , a__ , a__ , a__ , a__ , a__ )
A = sb_chrf.corpus_score(a__ , a__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 641
|
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Dict:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowerCAmelCase ( UpperCamelCase__: dict[int, list[int]] ) -> list[tuple[int, int]]:
"""simple docstring"""
A = 0
A = len(UpperCamelCase__ ) # No of vertices in graph
A = [0] * n
A = [False] * n
def dfs(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any ):
A = True
A = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , id_ )
A = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
A = min(low[at] , low[to] )
A = []
for i in range(UpperCamelCase__ ):
if not visited[i]:
dfs(UpperCamelCase__ , -1 , UpperCamelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase__ ( nn.Module ):
a_ =42
a_ =42
a_ =0.0
a_ =1
a_ =1
a_ =True
a_ =False
a_ =False
a_ =False
a_ =jnp.floataa
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(self.num_layers ):
lowerCAmelCase__ = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
lowerCAmelCase__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
lowerCAmelCase__ = resnets
lowerCAmelCase__ = attentions
if self.add_downsample:
lowerCAmelCase__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> str:
'''simple docstring'''
lowerCAmelCase__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowerCAmelCase__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
lowerCAmelCase__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase__ ( nn.Module ):
a_ =42
a_ =42
a_ =0.0
a_ =1
a_ =True
a_ =jnp.floataa
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = []
for i in range(self.num_layers ):
lowerCAmelCase__ = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
lowerCAmelCase__ = resnets
if self.add_downsample:
lowerCAmelCase__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> int:
'''simple docstring'''
lowerCAmelCase__ = ()
for resnet in self.resnets:
lowerCAmelCase__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase__ ( nn.Module ):
a_ =42
a_ =42
a_ =42
a_ =0.0
a_ =1
a_ =1
a_ =True
a_ =False
a_ =False
a_ =False
a_ =jnp.floataa
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(self.num_layers ):
lowerCAmelCase__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase__ = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
lowerCAmelCase__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
lowerCAmelCase__ = resnets
lowerCAmelCase__ = attentions
if self.add_upsample:
lowerCAmelCase__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> Dict:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowerCAmelCase__ = res_hidden_states_tuple[-1]
lowerCAmelCase__ = res_hidden_states_tuple[:-1]
lowerCAmelCase__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
lowerCAmelCase__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
lowerCAmelCase__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowercase__ ( nn.Module ):
a_ =42
a_ =42
a_ =42
a_ =0.0
a_ =1
a_ =True
a_ =jnp.floataa
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = []
for i in range(self.num_layers ):
lowerCAmelCase__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase__ = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
lowerCAmelCase__ = resnets
if self.add_upsample:
lowerCAmelCase__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> int:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
lowerCAmelCase__ = res_hidden_states_tuple[-1]
lowerCAmelCase__ = res_hidden_states_tuple[:-1]
lowerCAmelCase__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
lowerCAmelCase__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowercase__ ( nn.Module ):
a_ =42
a_ =0.0
a_ =1
a_ =1
a_ =False
a_ =False
a_ =jnp.floataa
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowerCAmelCase__ = []
for _ in range(self.num_layers ):
lowerCAmelCase__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
lowerCAmelCase__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
lowerCAmelCase__ = resnets
lowerCAmelCase__ = attentions
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.resnets[0](__UpperCAmelCase , __UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowerCAmelCase__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
lowerCAmelCase__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
return hidden_states
| 115
| 1
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowerCamelCase__ ( _a):
random.seed(_a)
np.random.seed(_a)
torch.manual_seed(_a)
torch.cuda.manual_seed_all(_a)
# ^^ safe to call this function even if cuda is not available
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a : Iterable[torch.nn.Parameter] , a : float = 0.9999 , a : float = 0.0 , a : int = 0 , a : bool = False , a : Union[float, int] = 1.0 , a : Union[float, int] = 2 / 3 , a : Optional[Any] = None , a : Dict[str, Any] = None , **a : Dict , ) -> Tuple:
"""simple docstring"""
if isinstance(a , torch.nn.Module ):
SCREAMING_SNAKE_CASE : Optional[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , a , standard_warn=a , )
SCREAMING_SNAKE_CASE : Optional[Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
SCREAMING_SNAKE_CASE : Tuple = True
if kwargs.get("max_value" , a ) is not None:
SCREAMING_SNAKE_CASE : List[Any] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , a , standard_warn=a )
SCREAMING_SNAKE_CASE : str = kwargs["max_value"]
if kwargs.get("min_value" , a ) is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , a , standard_warn=a )
SCREAMING_SNAKE_CASE : List[Any] = kwargs["min_value"]
SCREAMING_SNAKE_CASE : Optional[int] = list(a )
SCREAMING_SNAKE_CASE : str = [p.clone().detach() for p in parameters]
if kwargs.get("device" , a ) is not None:
SCREAMING_SNAKE_CASE : int = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , a , standard_warn=a )
self.to(device=kwargs["device"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = decay
SCREAMING_SNAKE_CASE : Tuple = min_decay
SCREAMING_SNAKE_CASE : Tuple = update_after_step
SCREAMING_SNAKE_CASE : Tuple = use_ema_warmup
SCREAMING_SNAKE_CASE : List[Any] = inv_gamma
SCREAMING_SNAKE_CASE : int = power
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : List[Any] = None # set in `step()`
SCREAMING_SNAKE_CASE : Union[str, Any] = model_cls
SCREAMING_SNAKE_CASE : Dict = model_config
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , a : Optional[int] , a : Tuple ) -> "EMAModel":
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = model_cls.load_config(a , return_unused_kwargs=a )
SCREAMING_SNAKE_CASE : Any = model_cls.from_pretrained(a )
SCREAMING_SNAKE_CASE : Tuple = cls(model.parameters() , model_cls=a , model_config=model.config )
ema_model.load_state_dict(a )
return ema_model
def __UpperCamelCase ( self : List[str] , a : Any ) -> List[Any]:
"""simple docstring"""
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
SCREAMING_SNAKE_CASE : List[Any] = self.model_cls.from_config(self.model_config )
SCREAMING_SNAKE_CASE : List[str] = self.state_dict()
state_dict.pop("shadow_params" , a )
model.register_to_config(**a )
self.copy_to(model.parameters() )
model.save_pretrained(a )
def __UpperCamelCase ( self : List[str] , a : int ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
SCREAMING_SNAKE_CASE : Any = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
SCREAMING_SNAKE_CASE : List[str] = (1 + step) / (10 + step)
SCREAMING_SNAKE_CASE : Optional[Any] = min(a , self.decay )
# make sure decay is not smaller than min_decay
SCREAMING_SNAKE_CASE : str = max(a , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __UpperCamelCase ( self : Optional[int] , a : Iterable[torch.nn.Parameter] ) -> int:
"""simple docstring"""
if isinstance(a , torch.nn.Module ):
SCREAMING_SNAKE_CASE : Tuple = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , a , standard_warn=a , )
SCREAMING_SNAKE_CASE : List[Any] = parameters.parameters()
SCREAMING_SNAKE_CASE : List[str] = list(a )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_decay(self.optimization_step )
SCREAMING_SNAKE_CASE : Any = decay
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - decay
SCREAMING_SNAKE_CASE : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , a ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
SCREAMING_SNAKE_CASE : List[str] = deepspeed.zero.GatheredParameters(a , modifier_rank=a )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(a )
def __UpperCamelCase ( self : Optional[Any] , a : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = list(a )
for s_param, param in zip(self.shadow_params , a ):
param.data.copy_(s_param.to(param.device ).data )
def __UpperCamelCase ( self : List[str] , a : int=None , a : Union[str, Any]=None ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [
p.to(device=a , dtype=a ) if p.is_floating_point() else p.to(device=a )
for p in self.shadow_params
]
def __UpperCamelCase ( self : Dict ) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __UpperCamelCase ( self : Tuple , a : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [param.detach().cpu().clone() for param in parameters]
def __UpperCamelCase ( self : Tuple , a : Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , a ):
param.data.copy_(c_param.data )
# Better memory-wise.
SCREAMING_SNAKE_CASE : Tuple = None
def __UpperCamelCase ( self : Optional[int] , a : dict ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = copy.deepcopy(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , a ):
raise ValueError("Invalid min_decay" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , a ):
raise ValueError("Invalid optimization_step" )
SCREAMING_SNAKE_CASE : Dict = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , a ):
raise ValueError("Invalid update_after_step" )
SCREAMING_SNAKE_CASE : List[Any] = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , a ):
raise ValueError("Invalid use_ema_warmup" )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
SCREAMING_SNAKE_CASE : List[Any] = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
SCREAMING_SNAKE_CASE : Tuple = state_dict.get("shadow_params" , a )
if shadow_params is not None:
SCREAMING_SNAKE_CASE : List[Any] = shadow_params
if not isinstance(self.shadow_params , a ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(a , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 25
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase =pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_dataset(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : List[str] =path + '.py'
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : str ):
'''simple docstring'''
inspect_metric(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : List[Any] =path + '.py'
assert script_name in os.listdir(__lowerCamelCase )
assert "__pycache__" not in os.listdir(__lowerCamelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : str =get_dataset_config_info(__lowerCamelCase , config_name=__lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
with pytest.raises(__lowerCamelCase ):
get_dataset_config_info(__lowerCamelCase , config_name=__lowerCamelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ):
'''simple docstring'''
_UpperCAmelCase : Dict =get_dataset_config_names(__lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =get_dataset_infos(__lowerCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCAmelCase : Optional[int] =expected_configs[0]
assert expected_config in infos
_UpperCAmelCase : Optional[int] =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : str =get_dataset_infos(__lowerCamelCase )
assert expected_config in infos
_UpperCAmelCase : Dict =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
'''simple docstring'''
with pytest.raises(__lowerCamelCase ):
get_dataset_split_names(__lowerCamelCase , config_name=__lowerCamelCase )
| 446
| 0
|
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase__ : int =re.compile(r'''([A-Z]+)([A-Z][a-z])''')
UpperCAmelCase__ : Optional[Any] =re.compile(r'''([a-z\d])([A-Z])''')
UpperCAmelCase__ : Dict =re.compile(r'''(?<!_)_(?!_)''')
UpperCAmelCase__ : Any =re.compile(r'''(_{2,})''')
UpperCAmelCase__ : Union[str, Any] =r'''^\w+(\.\w+)*$'''
UpperCAmelCase__ : Any =r'''<>:/\|?*'''
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase =_uppercase_uppercase_re.sub(r"""\1_\2""" , _UpperCAmelCase )
lowerCamelCase =_lowercase_uppercase_re.sub(r"""\1_\2""" , _UpperCAmelCase )
return name.lower()
def _lowercase ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase =_single_underscore_re.split(_UpperCAmelCase )
lowerCamelCase =[_multiple_underscores_re.split(_UpperCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_UpperCAmelCase ) if n != """""" )
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(_UpperCAmelCase )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , _UpperCAmelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(_UpperCAmelCase )}-{split}"""
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ) -> str:
lowerCamelCase =filename_prefix_for_split(_UpperCAmelCase , _UpperCAmelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
lowerCamelCase =os.path.join(_UpperCAmelCase , _UpperCAmelCase )
return F"""{filepath}*"""
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Any:
lowerCamelCase =filename_prefix_for_split(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if shard_lengths:
lowerCamelCase =len(_UpperCAmelCase )
lowerCamelCase =[F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(_UpperCAmelCase )]
if filetype_suffix:
lowerCamelCase =[filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
lowerCamelCase =prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 269
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCAmelCase__ : Tuple =version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , ) -> int:
output_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , use_external_data_format=_UpperCAmelCase , enable_onnx_checker=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
else:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
@torch.no_grad()
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ) -> Tuple:
lowerCamelCase =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase ="""cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowerCamelCase ="""cpu"""
lowerCamelCase =Path(_UpperCAmelCase )
# VAE DECODER
lowerCamelCase =AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowerCamelCase =vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCamelCase =vae_decoder.decode
onnx_export(
_UpperCAmelCase , model_args=(
torch.randn(1 , _UpperCAmelCase , 25 , 25 ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=_UpperCAmelCase , )
del vae_decoder
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
UpperCAmelCase__ : Union[str, Any] =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 269
| 1
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case = random.Random()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[Any]:
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int=7 , __lowerCamelCase : List[Any]=4_0_0 , __lowerCamelCase : Optional[int]=2_0_0_0 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : List[Any]=1_6_0_0_0 , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=True , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = feature_size
_snake_case = padding_value
_snake_case = sampling_rate
_snake_case = return_attention_mask
_snake_case = do_normalize
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
def _flatten(__lowerCamelCase : Any ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
_snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Tuple = WavaVecaFeatureExtractor
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = WavaVecaFeatureExtractionTester(self )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0 ) - 1 ) < 1E-3 ) )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_snake_case = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
# Test batched
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_snake_case = np.asarray(__lowerCamelCase )
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
_snake_case = feat_extract(__lowerCamelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ) )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = ['''longest''', '''max_length''', '''do_not_pad''']
_snake_case = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
_snake_case = feat_extract(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = range(8_0_0 , 1_4_0_0 , 2_0_0 )
_snake_case = [floats_list((1, x) )[0] for x in lengths]
_snake_case = ['''longest''', '''max_length''', '''do_not_pad''']
_snake_case = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
_snake_case = feat_extract(__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = feat_extract(
__lowerCamelCase , truncation=__lowerCamelCase , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
import torch
_snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_snake_case = np.random.rand(1_0_0 ).astype(np.floataa )
_snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_snake_case = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_snake_case = WavaVecaConfig.from_pretrained(__lowerCamelCase )
_snake_case = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 103
|
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ :
__magic_name__ = 42
__magic_name__ = None
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> str:
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ) -> List[str]:
return f"""`pip install {cls.pip_package or cls.name}`"""
class UpperCamelCase_ (__A ):
__magic_name__ = '''optuna'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
return is_optuna_available()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> int:
return run_hp_search_optuna(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
return default_hp_space_optuna(lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''ray'''
__magic_name__ = '''\'ray[tune]\''''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
return is_ray_available()
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> List[str]:
return run_hp_search_ray(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Tuple ) -> Optional[int]:
return default_hp_space_ray(lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''sigopt'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
return is_sigopt_available()
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : str ) -> str:
return run_hp_search_sigopt(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Any ) -> Any:
return default_hp_space_sigopt(lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''wandb'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> int:
return is_wandb_available()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
return run_hp_search_wandb(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[str] ) -> Any:
return default_hp_space_wandb(lowerCAmelCase_ )
lowerCamelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def snake_case ( ):
UpperCAmelCase_ : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(A__ ) > 0:
UpperCAmelCase_ : Optional[Any] = available_backends[0].name
if len(A__ ) > 1:
logger.info(
F"""{len(A__ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 95
| 0
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 700
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase : Tuple = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __magic_name__( lowerCamelCase, lowerCamelCase=None):
require_version(deps[pkg], lowerCamelCase)
| 474
| 0
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] , A : str = "▁" , A : bool = True , A : Union[str, AddedToken] = "<unk>" , A : Union[str, AddedToken] = "</s>" , A : Union[str, AddedToken] = "<pad>" , ):
_UpperCAmelCase : Union[str, Any] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_UpperCAmelCase : List[Any] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_UpperCAmelCase : Optional[int] = token_dict["token"]
_UpperCAmelCase : Union[str, Any] = Tokenizer(Unigram() )
_UpperCAmelCase : List[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
_UpperCAmelCase : Optional[Any] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=A , add_prefix_space=A ),
pre_tokenizers.Digits(individual_digits=A ),
pre_tokenizers.Punctuation(),
] )
_UpperCAmelCase : int = decoders.Metaspace(replacement=A , add_prefix_space=A )
_UpperCAmelCase : List[str] = TemplateProcessing(
single=F"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
_UpperCAmelCase : Dict = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(A , A )
def _A ( self : Tuple , A : Union[str, List[str]] , A : int = 8000 , A : bool = True , ):
_UpperCAmelCase : Optional[Any] = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
if isinstance(A , A ):
_UpperCAmelCase : Tuple = [files]
self._tokenizer.train(A , trainer=A )
self.add_unk_id()
def _A ( self : Any , A : Union[Iterator[str], Iterator[Iterator[str]]] , A : int = 8000 , A : bool = True , ):
_UpperCAmelCase : Optional[Any] = trainers.UnigramTrainer(
vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , )
self._tokenizer.train_from_iterator(A , trainer=A )
self.add_unk_id()
def _A ( self : Any ):
_UpperCAmelCase : Union[str, Any] = json.loads(self._tokenizer.to_str() )
_UpperCAmelCase : Optional[Any] = self.special_tokens["unk"]["id"]
_UpperCAmelCase : List[Any] = Tokenizer.from_str(json.dumps(A ) )
| 244
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : str , A : Optional[int] , A : List[str] , A : List[str]=None , A : List[Any]=None ):
_UpperCAmelCase : Union[str, Any] = self.layer[current_layer](A , A , head_mask[current_layer] )
_UpperCAmelCase : Tuple = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case__ , )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] , A : Tuple ):
super().__init__(A )
_UpperCAmelCase : Optional[Any] = BertEncoderWithPabee(A )
self.init_weights()
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Optional[int] = 0
def _A ( self : Tuple , A : Tuple ):
_UpperCAmelCase : Union[str, Any] = threshold
def _A ( self : Optional[int] , A : Union[str, Any] ):
_UpperCAmelCase : Any = patience
def _A ( self : List[Any] ):
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = 0
def _A ( self : int ):
_UpperCAmelCase : List[Any] = self.inference_layers_num / self.inference_instances_num
_UpperCAmelCase : str = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(A )
@add_start_docstrings_to_model_forward(A )
def _A ( self : Optional[int] , A : int=None , A : Union[str, Any]=None , A : int=None , A : List[Any]=None , A : Dict=None , A : Dict=None , A : Optional[int]=None , A : str=None , A : Union[str, Any]=None , A : Any=None , A : List[Any]=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
_UpperCAmelCase : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
_UpperCAmelCase : Tuple = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
_UpperCAmelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_UpperCAmelCase : Optional[int] = torch.ones(A , device=A )
if token_type_ids is None:
_UpperCAmelCase : Optional[int] = torch.zeros(A , dtype=torch.long , device=A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(A , A , A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = encoder_hidden_states.size()
_UpperCAmelCase : Tuple = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_UpperCAmelCase : Tuple = torch.ones(A , device=A )
_UpperCAmelCase : Optional[int] = self.invert_attention_mask(A )
else:
_UpperCAmelCase : Optional[int] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_UpperCAmelCase : List[str] = self.get_head_mask(A , self.config.num_hidden_layers )
_UpperCAmelCase : List[Any] = self.embeddings(
input_ids=A , position_ids=A , token_type_ids=A , inputs_embeds=A )
_UpperCAmelCase : Optional[Any] = embedding_output
if self.training:
_UpperCAmelCase : Tuple = []
for i in range(self.config.num_hidden_layers ):
_UpperCAmelCase : int = self.encoder.adaptive_forward(
A , current_layer=A , attention_mask=A , head_mask=A )
_UpperCAmelCase : Dict = self.pooler(A )
_UpperCAmelCase : List[str] = output_layers[i](output_dropout(A ) )
res.append(A )
elif self.patience == 0: # Use all layers for inference
_UpperCAmelCase : Tuple = self.encoder(
A , attention_mask=A , head_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_UpperCAmelCase : List[str] = self.pooler(encoder_outputs[0] )
_UpperCAmelCase : str = [output_layers[self.config.num_hidden_layers - 1](A )]
else:
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_UpperCAmelCase : Union[str, Any] = self.encoder.adaptive_forward(
A , current_layer=A , attention_mask=A , head_mask=A )
_UpperCAmelCase : int = self.pooler(A )
_UpperCAmelCase : Optional[Any] = output_layers[i](A )
if regression:
_UpperCAmelCase : Dict = logits.detach()
if patient_result is not None:
_UpperCAmelCase : Tuple = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_UpperCAmelCase : Dict = 0
else:
_UpperCAmelCase : List[str] = logits.detach().argmax(dim=1 )
if patient_result is not None:
_UpperCAmelCase : int = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(A ) ):
patient_counter += 1
else:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = logits
if patient_counter == self.patience:
break
_UpperCAmelCase : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case__ , )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[str] , A : str ):
super().__init__(A )
_UpperCAmelCase : Union[str, Any] = config.num_labels
_UpperCAmelCase : int = BertModelWithPabee(A )
_UpperCAmelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
_UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(A )
def _A ( self : Union[str, Any] , A : Any=None , A : Optional[int]=None , A : str=None , A : str=None , A : Optional[Any]=None , A : int=None , A : str=None , ):
_UpperCAmelCase : Any = self.bert(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_UpperCAmelCase : List[Any] = (logits[-1],)
if labels is not None:
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = 0
for ix, logits_item in enumerate(A ):
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase : List[Any] = MSELoss()
_UpperCAmelCase : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_UpperCAmelCase : int = CrossEntropyLoss()
_UpperCAmelCase : Any = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_UpperCAmelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_UpperCAmelCase : List[Any] = (total_loss / total_weights,) + outputs
return outputs
| 244
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[Any] = "mgp-str"
def __init__( self : int , lowerCAmelCase : int=[32, 128] , lowerCAmelCase : str=4 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : str=27 , lowerCAmelCase : Dict=38 , lowerCAmelCase : str=50257 , lowerCAmelCase : List[Any]=30522 , lowerCAmelCase : Union[str, Any]=768 , lowerCAmelCase : Optional[Any]=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Optional[Any]=4.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Union[str, Any]=1E-5 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Union[str, Any]=0.02 , **lowerCAmelCase : int , )-> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = max_token_length
UpperCAmelCase = num_character_labels
UpperCAmelCase = num_bpe_labels
UpperCAmelCase = num_wordpiece_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = mlp_ratio
UpperCAmelCase = distilled
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = drop_rate
UpperCAmelCase = qkv_bias
UpperCAmelCase = attn_drop_rate
UpperCAmelCase = drop_path_rate
UpperCAmelCase = output_aa_attentions
UpperCAmelCase = initializer_range
| 50
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
config.addinivalue_line(
'''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase__, id=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if exitstatus == 5:
UpperCamelCase__ = 0
# Doctest custom flag to ignore output.
lowercase = doctest.register_optionflag("""IGNORE_RESULT""")
lowercase = doctest.OutputChecker
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def A_ ( self : int , _a : Any , _a : Tuple , _a : Optional[int] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _A , _A , _A )
lowercase = CustomOutputChecker
lowercase = HfDoctestModule
lowercase = HfDocTestParser
| 240
|
from math import isclose, sqrt
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x
__SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4
__SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
__SCREAMING_SNAKE_CASE : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__SCREAMING_SNAKE_CASE : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus
__SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a__ ( snake_case = 1.4 , snake_case = -9.6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : float = first_x_coord
__SCREAMING_SNAKE_CASE : float = first_y_coord
__SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=30 , lowercase__=400 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=[0.5, 0.5, 0.5] , lowercase__=[0.5, 0.5, 0.5] , lowercase__=True , lowercase__=1 / 255 , lowercase__=True , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def A ( self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self , lowercase__ , lowercase__=False ) -> str:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(lowercase__ , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE = self.size['shortest_edge']
SCREAMING_SNAKE_CASE = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size['shortest_edge']
SCREAMING_SNAKE_CASE = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(lowercase__ , key=lambda lowercase__ : item[0] )[0]
SCREAMING_SNAKE_CASE = max(lowercase__ , key=lambda lowercase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = YolosImageProcessor if is_vision_available() else None
def A ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = YolosImageProcessingTester(self )
@property
def A ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase__ , 'image_std' ) )
self.assertTrue(hasattr(lowercase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase__ , 'size' ) )
def A ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , lowercase__ )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowercase__ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
SCREAMING_SNAKE_CASE = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowercase__ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowercase__ , batched=lowercase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE = self.image_processing_class(do_resize=lowercase__ , do_normalize=lowercase__ , do_rescale=lowercase__ )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE = image_processing_a.pad(lowercase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE = image_processing_a(lowercase__ , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1E-4 ) )
@slow
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {'image_id': 39769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE = image_processing(images=lowercase__ , annotations=lowercase__ , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase__ )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase__ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase__ ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase__ )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase__ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase__ ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase__ ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase__ ) )
@slow
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
SCREAMING_SNAKE_CASE = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE = image_processing(images=lowercase__ , annotations=lowercase__ , masks_path=lowercase__ , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase__ )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase__ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase__ ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase__ )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase__ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase__ ) )
# verify masks
SCREAMING_SNAKE_CASE = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase__ )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase__ ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase__ ) )
| 406
|
"""simple docstring"""
import random
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE_ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE_ )
else:
equal.append(SCREAMING_SNAKE_CASE_ )
return less, equal, greater
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(SCREAMING_SNAKE_CASE_ ) or index < 0:
return None
SCREAMING_SNAKE_CASE = items[random.randint(0, len(SCREAMING_SNAKE_CASE_ ) - 1 )]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _partition(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE_, index - (m + count) )
| 406
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 377
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : List[Any] = seq_length
UpperCamelCase__ : List[Any] = is_training
UpperCamelCase__ : Any = use_input_mask
UpperCamelCase__ : Dict = use_token_type_ids
UpperCamelCase__ : List[str] = use_labels
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : int = type_sequence_label_size
UpperCamelCase__ : Tuple = initializer_range
UpperCamelCase__ : Optional[Any] = num_labels
UpperCamelCase__ : List[str] = num_choices
UpperCamelCase__ : Union[str, Any] = scope
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Tuple = None
if self.use_input_mask:
UpperCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : int = None
if self.use_token_type_ids:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : int = None
UpperCamelCase__ : Tuple = None
if self.use_labels:
UpperCamelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = BioGptModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = BioGptModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# create attention mask
UpperCamelCase__ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = self.seq_length // 2
UpperCamelCase__ : List[str] = 0
# first forward pass
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase__ : Tuple = ids_tensor((1,) , __SCREAMING_SNAKE_CASE ).item() + 1
UpperCamelCase__ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCamelCase__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )] , dim=1 , )
# get two different outputs
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
# select random slice
UpperCamelCase__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[str] = BioGptModel(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
# first forward pass
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ : str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
UpperCamelCase__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )[
'''last_hidden_state'''
]
# select random slice
UpperCamelCase__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = BioGptForCausalLM(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase__ : Tuple = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = BioGptModel(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.num_labels
UpperCamelCase__ : Any = BioGptForTokenClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,
) : List[str] = config_and_inputs
UpperCamelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (BioGptForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptModelTester(self )
UpperCamelCase__ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ : Optional[Any] = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__SCREAMING_SNAKE_CASE , gradient_checkpointing=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Optional[Any] = '''left'''
# Define PAD Token = EOS Token = 50256
UpperCamelCase__ : Optional[int] = tokenizer.eos_token
UpperCamelCase__ : List[str] = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase__ : Optional[int] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
UpperCamelCase__ : Dict = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = inputs['''input_ids'''].to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask'''].to(__SCREAMING_SNAKE_CASE ) , )
UpperCamelCase__ : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = model.generate(input_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
UpperCamelCase__ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings )
UpperCamelCase__ : Any = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence] )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Optional[int] = BioGptModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : int = 3
UpperCamelCase__ : Tuple = input_dict['''input_ids''']
UpperCamelCase__ : List[Any] = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase__ : Optional[int] = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : List[str] = 3
UpperCamelCase__ : List[Any] = '''multi_label_classification'''
UpperCamelCase__ : List[str] = input_dict['''input_ids''']
UpperCamelCase__ : Tuple = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase__ : List[str] = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : Union[str, Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
UpperCamelCase__ : Tuple = model(__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ : Optional[Any] = 4_2_3_8_4
UpperCamelCase__ : List[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCamelCase__ : List[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
UpperCamelCase__ : Dict = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = model.generate(
**__SCREAMING_SNAKE_CASE , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 285
| 0
|
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def _SCREAMING_SNAKE_CASE( snake_case_ : Optional[int] ) ->Optional[int]:
'''simple docstring'''
# getting number of pixels in the image
_lowercase , _lowercase : str = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
_lowercase : str = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCamelCase__ = imread('image_data/lena.jpg', 1)
# convert to its negative
lowerCamelCase__ = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 713
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
'''simple docstring'''
def __init__( self : Optional[int] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 411
| 0
|
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __snake_case ( _lowercase):
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_lowerCamelCase : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_lowerCamelCase : List[Any] = bertabert.config.encoder.vocab_size
_lowerCamelCase : Optional[int] = tokenizer.sep_token_id
_lowerCamelCase : Any = tokenizer.cls_token_id
_lowerCamelCase : Tuple = 1_2_8
_lowerCamelCase : str = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_lowerCamelCase : List[Any] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_lowerCamelCase : Union[str, Any] = train_dataset.select(range(3_2 ) )
_lowerCamelCase : Tuple = val_dataset.select(range(1_6 ) )
_lowerCamelCase : Optional[Any] = 4
def _map_to_encoder_decoder_inputs(__lowerCAmelCase : Union[str, Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCamelCase : Union[str, Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__lowerCAmelCase , max_length=5_1_2 )
_lowerCamelCase : Optional[Any] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__lowerCAmelCase , max_length=1_2_8 )
_lowerCamelCase : Union[str, Any] = inputs.input_ids
_lowerCamelCase : Dict = inputs.attention_mask
_lowerCamelCase : Union[str, Any] = outputs.input_ids
_lowerCamelCase : Optional[int] = outputs.input_ids.copy()
_lowerCamelCase : List[str] = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_lowerCamelCase : List[Any] = outputs.attention_mask
assert all(len(__lowerCAmelCase ) == 5_1_2 for x in inputs.input_ids )
assert all(len(__lowerCAmelCase ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCAmelCase : List[str] ):
_lowerCamelCase : Union[str, Any] = pred.label_ids
_lowerCamelCase : List[Any] = pred.predictions
# all unnecessary tokens are removed
_lowerCamelCase : Optional[int] = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_lowerCamelCase : List[str] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCAmelCase ) )] ) / len(__lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
_lowerCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_lowerCamelCase : Union[str, Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_lowerCamelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Dict = SeqaSeqTrainingArguments(
output_dir=__lowerCAmelCase , per_device_train_batch_size=__lowerCAmelCase , per_device_eval_batch_size=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , evaluation_strategy='''steps''' , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCamelCase : Optional[Any] = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# start training
trainer.train()
| 83
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __A :
UpperCamelCase :Dict = PegasusConfig
UpperCamelCase :Dict = {}
UpperCamelCase :Union[str, Any] = '''gelu'''
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=False , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=20 , __magic_name__=2 , __magic_name__=1 , __magic_name__=0 , ):
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : str = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : Tuple = pad_token_id
lowerCamelCase__ : List[str] = bos_token_id
def _snake_case (self ):
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCamelCase__ : Any = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__ : Any = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase__ : Dict = prepare_pegasus_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Optional[int] = 20
lowerCamelCase__ : str = model_class_name(__magic_name__ )
lowerCamelCase__ : List[str] = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ )
lowerCamelCase__ : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : Dict = model.decode(__magic_name__ , __magic_name__ )
lowerCamelCase__ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : List[str] = 20
lowerCamelCase__ : Optional[int] = model_class_name(__magic_name__ )
lowerCamelCase__ : List[Any] = model.encode(inputs_dict["""input_ids"""] )
lowerCamelCase__ ,lowerCamelCase__ : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowerCamelCase__ : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , __magic_name__ , __magic_name__ )
lowerCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase__ : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , __magic_name__ , decoder_attention_mask=__magic_name__ , past_key_values=__magic_name__ , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCamelCase__ : str = model.decode(
decoder_input_ids[:, -1:] , __magic_name__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__magic_name__ , decoder_position_ids=__magic_name__ , )
lowerCamelCase__ : Optional[int] = model.decode(__magic_name__ , __magic_name__ , decoder_attention_mask=__magic_name__ )
lowerCamelCase__ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _A (UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , ) ->Optional[Any]:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__ : List[Any] = np.not_equal(UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCamelCase__ : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase :int = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Optional[int] = False
UpperCamelCase :Optional[Any] = False
UpperCamelCase :List[str] = False
def _snake_case (self ):
lowerCamelCase__ : Dict = FlaxPegasusModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__magic_name__ , __magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__magic_name__ , __magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Any = self._prepare_for_class(__magic_name__ , __magic_name__ )
lowerCamelCase__ : Optional[Any] = model_class(__magic_name__ )
@jax.jit
def encode_jitted(__magic_name__ , __magic_name__=None , **__magic_name__ ):
return model.encode(input_ids=__magic_name__ , attention_mask=__magic_name__ )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase__ : str = encode_jitted(**__magic_name__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase__ : Optional[Any] = encode_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case (self ):
lowerCamelCase__ ,lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Tuple = model_class(__magic_name__ )
lowerCamelCase__ : Any = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowerCamelCase__ : str = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__magic_name__ , __magic_name__ , __magic_name__ ):
return model.decode(
decoder_input_ids=__magic_name__ , decoder_attention_mask=__magic_name__ , encoder_outputs=__magic_name__ , )
with self.subTest("""JIT Enabled""" ):
lowerCamelCase__ : int = decode_jitted(**__magic_name__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCamelCase__ : Optional[int] = decode_jitted(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
for jitted_output, output in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case (self ):
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__magic_name__ )
lowerCamelCase__ : List[Any] = np.ones((1, 1) )
lowerCamelCase__ : Optional[int] = model(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
def _snake_case (self ):
lowerCamelCase__ : str = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
lowerCamelCase__ : Any = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
lowerCamelCase__ : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowerCamelCase__ : str = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
lowerCamelCase__ : Optional[Any] = tokenizer(__magic_name__ , return_tensors="""np""" , truncation=__magic_name__ , max_length=512 , padding=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = model.generate(**__magic_name__ , num_beams=2 ).sequences
lowerCamelCase__ : List[Any] = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
assert tgt_text == decoded
| 157
| 0
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = nn.functional.normalize(_lowerCAmelCase )
_a = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase, normalized_text_embeds.t() )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Union[str, Any] = CLIPConfig
A_ : Optional[Any] = ['CLIPEncoderLayer']
def __init__( self , __UpperCAmelCase ) -> List[Any]:
super().__init__(__UpperCAmelCase )
_a = CLIPVisionModel(config.vision_config )
_a = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__UpperCAmelCase )
_a = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__UpperCAmelCase )
_a = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__UpperCAmelCase )
_a = nn.Parameter(torch.ones(17 ) , requires_grad=__UpperCAmelCase )
_a = nn.Parameter(torch.ones(3 ) , requires_grad=__UpperCAmelCase )
@torch.no_grad()
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_a = self.vision_model(__UpperCAmelCase )[1] # pooled_output
_a = self.visual_projection(__UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_a = cosine_distance(__UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
_a = cosine_distance(__UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
_a = []
_a = image_embeds.shape[0]
for i in range(__UpperCAmelCase ):
_a = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_a = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_a = special_cos_dist[i][concept_idx]
_a = self.special_care_embeds_weights[concept_idx].item()
_a = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
_a = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
_a = cos_dist[i][concept_idx]
_a = self.concept_embeds_weights[concept_idx].item()
_a = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__UpperCAmelCase )
result.append(__UpperCAmelCase )
_a = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
_a = self.vision_model(__UpperCAmelCase )[1] # pooled_output
_a = self.visual_projection(__UpperCAmelCase )
_a = cosine_distance(__UpperCAmelCase , self.special_care_embeds )
_a = cosine_distance(__UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_a = 0.0
_a = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_a = torch.any(special_scores > 0 , dim=1 )
_a = special_care * 0.01
_a = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_a = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_a = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 285
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 285
| 1
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CTRLTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
snake_case_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
snake_case_ = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
snake_case_ = {"unk_token": "<unk>"}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def lowerCAmelCase__ ( self : int , **_lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case_ = "adapt react readapt apt"
snake_case_ = "adapt react readapt apt"
return input_text, output_text
def lowerCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
snake_case_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ = "adapt react readapt apt"
snake_case_ = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
snake_case_ = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
| 283
|
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : int , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCAmelCase )
snake_case_ = self.values[key]
def lowerCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return (
sum(self.charge_factor - len(_lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=None ) -> int:
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCAmelCase , _lowerCAmelCase )
| 283
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
UpperCamelCase__ = ['''names''', '''prefix''']
UpperCamelCase__ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
UpperCamelCase__ = ['''encoding_errors''', '''on_bad_lines''']
UpperCamelCase__ = ['''date_format''']
@dataclass
class lowerCamelCase_ ( datasets.BuilderConfig ):
lowerCAmelCase__ = ","
lowerCAmelCase__ = None
lowerCAmelCase__ = "infer"
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = "."
lowerCAmelCase__ = None
lowerCAmelCase__ = '"'
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = None
lowerCAmelCase__ = 1_0_0_0_0
lowerCAmelCase__ = None
lowerCAmelCase__ = "strict"
lowerCAmelCase__ = "error"
lowerCAmelCase__ = None
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
if self.delimiter is not None:
UpperCAmelCase__ : Optional[int] = self.delimiter
if self.column_names is not None:
UpperCAmelCase__ : Any = self.column_names
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _A ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCamelCase_ ( datasets.ArrowBasedBuilder ):
lowerCAmelCase__ = CsvConfig
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self : Dict , _A : int ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase__ : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
UpperCAmelCase__ : List[str] = data_files
if isinstance(_A , _A ):
UpperCAmelCase__ : List[Any] = [files]
UpperCAmelCase__ : str = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
UpperCAmelCase__ : Dict = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
UpperCAmelCase__ : Union[str, Any] = [files]
UpperCAmelCase__ : Union[str, Any] = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def lowercase_ ( self : Optional[Any] , _A : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
UpperCAmelCase__ : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(_A ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase__ : Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_A )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase__ : Any = table_cast(_A , _A )
return pa_table
def lowercase_ ( self : int , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase__ : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_A ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
UpperCAmelCase__ : Union[str, Any] = pd.read_csv(_A , iterator=_A , dtype=_A , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_A ):
UpperCAmelCase__ : List[Any] = pa.Table.from_pandas(_A )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(_A )}: {e}""" )
raise
| 312
|
'''simple docstring'''
# Imports
import numpy as np
class lowerCamelCase_ :
def __init__( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Any=None , _A : Dict=None , _A : Any=None ):
'''simple docstring'''
self.set_matricies(red=_A , green=_A , blue=_A , red_edge=_A , nir=_A )
def lowercase_ ( self : Optional[Any] , _A : Tuple=None , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : int=None ):
'''simple docstring'''
if red is not None:
UpperCAmelCase__ : str = red
if green is not None:
UpperCAmelCase__ : List[Any] = green
if blue is not None:
UpperCAmelCase__ : int = blue
if red_edge is not None:
UpperCAmelCase__ : Any = red_edge
if nir is not None:
UpperCAmelCase__ : str = nir
return True
def lowercase_ ( self : Union[str, Any] , _A : int="" , _A : List[str]=None , _A : Optional[Any]=None , _A : List[str]=None , _A : Union[str, Any]=None , _A : Optional[Any]=None ):
'''simple docstring'''
self.set_matricies(red=_A , green=_A , blue=_A , red_edge=_A , nir=_A )
UpperCAmelCase__ : List[Any] = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def lowercase_ ( self : Any ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowercase_ ( self : Any ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowercase_ ( self : str ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowercase_ ( self : Optional[Any] , _A : Tuple=0.0_8 , _A : Optional[Any]=1.2_2 , _A : Optional[Any]=0.0_3 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (self.nir / self.green) - 1
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowercase_ ( self : Dict ):
'''simple docstring'''
return self.nir - self.green
def lowercase_ ( self : Dict ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def lowercase_ ( self : Optional[int] , _A : Dict=0.1_6 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def lowercase_ ( self : str , _A : Optional[Any]=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def lowercase_ ( self : Optional[Any] , _A : List[str]=None , _A : Optional[int]=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowercase_ ( self : int ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 3_0.5
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.nir / self.red
def lowercase_ ( self : Any ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowercase_ ( self : int ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def lowercase_ ( self : int ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def lowercase_ ( self : str ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def lowercase_ ( self : int ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase__ : Tuple = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowercase_ ( self : Dict ):
'''simple docstring'''
return self.nir / self.red
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 312
| 1
|
"""simple docstring"""
A : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : int = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : List[str] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 636
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 )
| 670
| 0
|
"""simple docstring"""
from __future__ import annotations
__lowerCAmelCase : int = "Muhammad Umer Farooq"
__lowerCAmelCase : str = "MIT"
__lowerCAmelCase : str = "1.0.0"
__lowerCAmelCase : int = "Muhammad Umer Farooq"
__lowerCAmelCase : Tuple = "contact@muhammadumerfarooq.me"
__lowerCAmelCase : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a_ ( _UpperCamelCase ):
def __init__( self : List[Any] , snake_case__ : str ):
super().__init__()
lowerCAmelCase__ = []
lowerCAmelCase__ = domain
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : str , snake_case__ : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCAmelCase__ = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return ".".join(get_sub_domain_name(lowercase_ ).split(""".""" )[-2:] )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return parse.urlparse(lowercase_ ).netloc
def _UpperCAmelCase ( lowerCamelCase__ = "https://github.com" ):
"""simple docstring"""
lowerCAmelCase__ = get_domain_name(lowercase_ )
# Initialize the parser
lowerCAmelCase__ = Parser(lowercase_ )
try:
# Open URL
lowerCAmelCase__ = requests.get(lowercase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCAmelCase__ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCAmelCase__ = requests.get(lowercase_ )
# Get the valid email.
lowerCAmelCase__ = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase_ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = emails_from_url("https://github.com")
print(F"{len(emails)} emails found:")
print("\n".join(sorted(emails)))
| 707
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" )
lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ )
lowerCAmelCase__ = nlp.model.BERTModel(
lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , )
original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCamelCase__ ),
}
lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ )
lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" )
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""]
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase__ )
lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" )
lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : str = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 674
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : List[str] = ShapEPipeline
A_ : Dict = ['prompt']
A_ : Dict = ['prompt']
A_ : Union[str, Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : List[Any] = False
@property
def __UpperCamelCase ( self : int ) -> str:
return 32
@property
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
return 8
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def __UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
A = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
A = PriorTransformer(**__UpperCamelCase )
return model
@property
def __UpperCamelCase ( self : Any ) -> str:
torch.manual_seed(0 )
A = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
A = ShapERenderer(**__UpperCamelCase )
return model
def __UpperCamelCase ( self : List[str] ) -> int:
A = self.dummy_prior
A = self.dummy_text_encoder
A = self.dummy_tokenizer
A = self.dummy_renderer
A = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=__UpperCamelCase , clip_sample=__UpperCamelCase , clip_sample_range=1.0 , )
A = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict=0 ) -> Any:
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**__UpperCamelCase )
A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
A = output.images[0]
A = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __UpperCamelCase ( self : List[Any] ) -> str:
A = torch_device == 'cpu'
A = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.get_dummy_components()
A = self.pipeline_class(**__UpperCamelCase )
A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = 1
A = 2
A = self.get_dummy_inputs(__UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
A = batch_size * [inputs[key]]
A = pipe(**__UpperCamelCase , num_images_per_prompt=__UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ) -> int:
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
A = ShapEPipeline.from_pretrained('openai/shap-e' )
A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
A = pipe(
'a shark' , generator=__UpperCamelCase , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 106
|
from bisect import bisect
from itertools import accumulate
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
A , A = [i[0] for i in r], [i[1] for i in r]
A = list(accumulate(lowerCAmelCase__ ) )
A = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
| 1
|
"""simple docstring"""
def __lowercase ( a : int ) -> str:
if isinstance(a , a ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(a , a ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
__snake_case : int =False
if num < 0:
__snake_case : List[Any] =True
__snake_case : List[str] =-num
__snake_case : list[int] =[]
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(a ) for e in binary )
return "0b" + "".join(str(a ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ : Dict = logging.get_logger(__name__)
UpperCamelCase_ : Dict = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _lowercase ( lowerCAmelCase ):
_a : Union[str, Any] = '''xmod'''
def __init__( self : Optional[int] , a : Dict=3_0_5_2_2 , a : Any=7_6_8 , a : str=1_2 , a : Optional[int]=1_2 , a : Any=3_0_7_2 , a : int="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=5_1_2 , a : str=2 , a : Any=0.0_2 , a : Tuple=1e-12 , a : Optional[int]=1 , a : Any=0 , a : str=2 , a : str="absolute" , a : List[Any]=True , a : Optional[Any]=None , a : Tuple=False , a : Union[str, Any]=2 , a : Any=False , a : Dict=True , a : int=True , a : str=("en_XX",) , a : Dict=None , **a : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__snake_case : Union[str, Any] =vocab_size
__snake_case : Union[str, Any] =hidden_size
__snake_case : int =num_hidden_layers
__snake_case : Union[str, Any] =num_attention_heads
__snake_case : Any =hidden_act
__snake_case : Tuple =intermediate_size
__snake_case : int =hidden_dropout_prob
__snake_case : str =attention_probs_dropout_prob
__snake_case : int =max_position_embeddings
__snake_case : Tuple =type_vocab_size
__snake_case : Union[str, Any] =initializer_range
__snake_case : Dict =layer_norm_eps
__snake_case : Optional[int] =position_embedding_type
__snake_case : int =use_cache
__snake_case : int =classifier_dropout
__snake_case : int =pre_norm
__snake_case : Any =adapter_reduction_factor
__snake_case : str =adapter_layer_norm
__snake_case : Union[str, Any] =adapter_reuse_layer_norm
__snake_case : List[str] =ln_before_adapter
__snake_case : Optional[int] =list(a )
__snake_case : int =default_language
class _lowercase ( lowerCAmelCase ):
@property
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case : Tuple ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__snake_case : Tuple ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 497
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 494
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__a = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple, a_: str ):
_UpperCAmelCase : Any = state_dict.pop(a_ )
_UpperCAmelCase : Any = val
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCAmelCase : List[str] = key.replace("backbone.0.body", "backbone.conv_encoder.model" )
_UpperCAmelCase : Tuple = value
else:
_UpperCAmelCase : List[Any] = value
return new_state_dict
def __UpperCAmelCase ( a_: List[Any] ):
_UpperCAmelCase : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase : Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase : Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Union[str, Any] = in_proj_weight[:256, :]
_UpperCAmelCase : List[str] = in_proj_bias[:256]
_UpperCAmelCase : List[Any] = in_proj_weight[256:512, :]
_UpperCAmelCase : Any = in_proj_bias[256:512]
_UpperCAmelCase : Dict = in_proj_weight[-256:, :]
_UpperCAmelCase : List[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase : Dict = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = in_proj_weight[:256, :]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[:256]
_UpperCAmelCase : int = in_proj_weight[256:512, :]
_UpperCAmelCase : Any = in_proj_bias[256:512]
_UpperCAmelCase : Tuple = in_proj_weight[-256:, :]
_UpperCAmelCase : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCAmelCase : Optional[int] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_UpperCAmelCase : Dict = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCAmelCase : Dict = in_proj_weight_cross_attn[:256, :]
_UpperCAmelCase : List[Any] = in_proj_bias_cross_attn[:256]
_UpperCAmelCase : Tuple = in_proj_weight_cross_attn[256:512, :]
_UpperCAmelCase : Any = in_proj_bias_cross_attn[256:512]
_UpperCAmelCase : str = in_proj_weight_cross_attn[-256:, :]
_UpperCAmelCase : Tuple = in_proj_bias_cross_attn[-256:]
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase : List[Any] = image.size
_UpperCAmelCase : List[Any] = max(a_, a_ )
_UpperCAmelCase : Optional[int] = 800 if "detection" in checkpoint_url else 1_000
_UpperCAmelCase : str = target_max_size / current_max_size
_UpperCAmelCase : Dict = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCAmelCase ( a_: Optional[int] ):
_UpperCAmelCase : Optional[Any] = F.to_tensor(a_ )
_UpperCAmelCase : Optional[int] = F.normalize(a_, mean=[0.4_85, 0.4_56, 0.4_06], std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def __UpperCAmelCase ( a_: List[Any], a_: Optional[Any], a_: Union[str, Any] ):
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase : Optional[Any] = torch.hub.load_state_dict_from_url(a_, map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(a_, a_, a_ )
_UpperCAmelCase : List[str] = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase : Union[str, Any] = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCAmelCase : Optional[int] = state_dict.pop(a_ )
_UpperCAmelCase : Any = val
# create HuggingFace model and load state dict
_UpperCAmelCase : str = TableTransformerConfig(
backbone="resnet18", mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = 15
_UpperCAmelCase : Optional[int] = 2
_UpperCAmelCase : Any = {0: "table", 1: "table rotated"}
_UpperCAmelCase : Union[str, Any] = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase : List[str] = 125
_UpperCAmelCase : str = 6
_UpperCAmelCase : Tuple = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Optional[int] = DetrImageProcessor(
format="coco_detection", max_size=800 if "detection" in checkpoint_url else 1_000 )
_UpperCAmelCase : int = TableTransformerForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
# verify our conversion
_UpperCAmelCase : Optional[Any] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_UpperCAmelCase : Tuple = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=a_ )
_UpperCAmelCase : Optional[int] = Image.open(a_ ).convert("RGB" )
_UpperCAmelCase : Tuple = normalize(resize(a_, a_ ) ).unsqueeze(0 )
_UpperCAmelCase : List[str] = model(a_ )
if "detection" in checkpoint_url:
_UpperCAmelCase : Any = (1, 15, 3)
_UpperCAmelCase : Optional[int] = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
_UpperCAmelCase : int = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
_UpperCAmelCase : List[Any] = (1, 125, 7)
_UpperCAmelCase : Any = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
_UpperCAmelCase : List[Any] = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], a_, atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], a_, atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_UpperCAmelCase : Optional[int] = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(a_ )
image_processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 494
| 1
|
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
SCREAMING_SNAKE_CASE__ : Optional[int] = min(_lowerCamelCase , _lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 26
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26
| 1
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowercase_ = int(re.match(r""".*layer_(\d*).*""" , SCREAMING_SNAKE_CASE_ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if dtype == torch.bool:
return 1 / 8
lowercase_ = re.search(r"""[^\d](\d+)$""" , str(SCREAMING_SNAKE_CASE_ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
lowercase_ = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if bloom_config_file == "":
lowercase_ = BloomConfig()
else:
lowercase_ = BloomConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
if shard_model:
lowercase_ = os.listdir(SCREAMING_SNAKE_CASE_ )
lowercase_ = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = {"""weight_map""": {}, """metadata""": {}}
lowercase_ = 0
lowercase_ = None
lowercase_ = BloomConfig()
for j, file in enumerate(SCREAMING_SNAKE_CASE_ ):
print("""Processing file: {}""".format(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = None
for i in range(SCREAMING_SNAKE_CASE_ ):
# load all TP files
lowercase_ = file.replace("""model_00""" , F'''model_0{i}''' )
lowercase_ = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowercase_ = list(temp.keys() )
for key in keys:
lowercase_ = temp.pop(SCREAMING_SNAKE_CASE_ )
if tensors is None:
lowercase_ = temp
else:
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ = tensors[key] / pretraining_tp
torch.save(
SCREAMING_SNAKE_CASE_ , os.path.join(
SCREAMING_SNAKE_CASE_ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowercase_ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowercase_ = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(SCREAMING_SNAKE_CASE_ ) ).zfill(5 ) )
lowercase_ = BloomConfig()
lowercase_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowercase_ = total_size
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + """\n"""
f.write(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = BloomModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = os.listdir(SCREAMING_SNAKE_CASE_ )
lowercase_ = sorted(filter(lambda UpperCAmelCase__ : s.startswith("""layer""" ) and "model_00" in s , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = None
for i, file in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase_ = None
for i in range(SCREAMING_SNAKE_CASE_ ):
# load all TP files
lowercase_ = file.replace("""model_00""" , F'''model_0{i}''' )
lowercase_ = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , map_location="""cpu""" )
# Rename keys in the transformers names
lowercase_ = list(temp.keys() )
for key in keys:
lowercase_ = temp.pop(SCREAMING_SNAKE_CASE_ )
if tensors is None:
lowercase_ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowercase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowercase_ = torch.cat([tensors[key], temp[key]] , dim=SCREAMING_SNAKE_CASE_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(SCREAMING_SNAKE_CASE_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowercase_ = tensors[key] / pretraining_tp
lowercase_ = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowercase_ = set(other_keys.missing_keys )
else:
lowercase_ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowercase_ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowercase_ = model.to(config.torch_dtype )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 412
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_SCREAMING_SNAKE_CASE = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_SCREAMING_SNAKE_CASE = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def _snake_case ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="uniform_average" , _lowerCAmelCase=True ) -> Union[str, Any]:
_lowerCAmelCase = mean_squared_error(
_lowerCAmelCase , _lowerCAmelCase , sample_weight=_lowerCAmelCase , multioutput=_lowerCAmelCase , squared=_lowerCAmelCase )
return {"mse": mse}
| 18
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for attribute in key.split(""".""" ):
lowerCAmelCase__ : str = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
lowerCAmelCase__ : Optional[Any] = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
lowerCAmelCase__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ : List[str] = value
elif weight_type == "weight_g":
lowerCAmelCase__ : str = value
elif weight_type == "weight_v":
lowerCAmelCase__ : List[str] = value
elif weight_type == "bias":
lowerCAmelCase__ : int = value
elif weight_type == "running_mean":
lowerCAmelCase__ : int = value
elif weight_type == "running_var":
lowerCAmelCase__ : str = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ : List[Any] = value
elif weight_type == "inv_freq":
lowerCAmelCase__ : Dict = value
else:
lowerCAmelCase__ : Dict = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : str = fairseq_model.state_dict()
lowerCAmelCase__ : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ : str = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCAmelCase__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ : str = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCAmelCase__ : List[str] = True
if "*" in mapped_key:
lowerCAmelCase__ : Any = name.split(UpperCamelCase )[0].split(""".""" )[-2]
lowerCAmelCase__ : List[str] = mapped_key.replace("""*""" , UpperCamelCase )
if "pos_bias_u" in name:
lowerCAmelCase__ : Dict = None
elif "pos_bias_v" in name:
lowerCAmelCase__ : Union[str, Any] = None
elif "weight_g" in name:
lowerCAmelCase__ : Tuple = """weight_g"""
elif "weight_v" in name:
lowerCAmelCase__ : str = """weight_v"""
elif "bias" in name:
lowerCAmelCase__ : str = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ : int = """weight"""
elif "running_mean" in name:
lowerCAmelCase__ : str = """running_mean"""
elif "inv_freq" in name:
lowerCAmelCase__ : int = """inv_freq"""
elif "running_var" in name:
lowerCAmelCase__ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
lowerCAmelCase__ : Tuple = """num_batches_tracked"""
else:
lowerCAmelCase__ : Any = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = full_name.split("""conv_layers.""" )[-1]
lowerCAmelCase__ : Optional[int] = name.split(""".""" )
lowerCAmelCase__ : Any = int(items[0] )
lowerCAmelCase__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ : int = WavaVecaConformerConfig.from_pretrained(UpperCamelCase , hidden_act="""swish""" )
else:
lowerCAmelCase__ : int = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ : str = """rotary"""
if is_finetuned:
if dict_path:
lowerCAmelCase__ : Union[str, Any] = Dictionary.load(UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ : Optional[int] = target_dict.pad_index
lowerCAmelCase__ : Tuple = target_dict.bos_index
lowerCAmelCase__ : Optional[Any] = target_dict.eos_index
lowerCAmelCase__ : Optional[int] = len(target_dict.symbols )
lowerCAmelCase__ : int = os.path.join(UpperCamelCase , """vocab.json""" )
if not os.path.isdir(UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase ) )
return
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCAmelCase__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[int] = 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = WavaVecaCTCTokenizer(
UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCamelCase , )
lowerCAmelCase__ : List[str] = True if config.feat_extract_norm == """layer""" else False
lowerCAmelCase__ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase , return_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : List[str] = WavaVecaProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = WavaVecaConformerForCTC(UpperCamelCase )
else:
lowerCAmelCase__ : str = WavaVecaConformerForPreTraining(UpperCamelCase )
if is_finetuned:
lowerCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCAmelCase__ : int = argparse.Namespace(task="""audio_pretraining""" )
lowerCAmelCase__ : Optional[int] = fairseq.tasks.setup_task(UpperCamelCase )
lowerCAmelCase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase )
lowerCAmelCase__ : int = model[0].eval()
recursively_load_weights(UpperCamelCase , UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size if size is not None else {'''height''': 18, '''width''': 20}
__snake_case = do_thumbnail
__snake_case = do_align_axis
__snake_case = do_pad
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = DonutImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_pad''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std''' ) )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
@is_flaky()
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 24
|
from math import factorial
UpperCAmelCase : Tuple = {str(d): factorial(d) for d in range(10)}
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(SCREAMING_SNAKE_CASE ) )
def _A ( ):
"""simple docstring"""
a__ : Any =7 * factorial(9 ) + 1
return sum(i for i in range(3 , SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(SCREAMING_SNAKE_CASE ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 563
| 0
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
UpperCamelCase__ = False
UpperCamelCase__ = False
def a__ ( lowerCAmelCase__ ) -> List[str]:
return TrainCommand(lowerCAmelCase__ )
class lowerCamelCase_ ( __a ):
@staticmethod
def lowercase_ ( _A : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=_A , required=_A , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_A , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=_A , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=_A , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=_A , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=_A , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_A , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=_A , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=_A , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=_A , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=_A , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=_A , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=_A , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=_A )
def __init__( self : Dict , _A : Namespace ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = logging.get_logger('''transformers-cli/training''' )
UpperCAmelCase__ : Optional[int] = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_A )
UpperCAmelCase__ : Tuple = args.output
UpperCAmelCase__ : List[Any] = args.column_label
UpperCAmelCase__ : Tuple = args.column_text
UpperCAmelCase__ : Tuple = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
UpperCAmelCase__ : Optional[Any] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
UpperCAmelCase__ : Optional[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase__ : Optional[Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
UpperCAmelCase__ : Tuple = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase__ : Tuple = args.validation_split
UpperCAmelCase__ : Optional[int] = args.train_batch_size
UpperCAmelCase__ : Optional[Any] = args.valid_batch_size
UpperCAmelCase__ : int = args.learning_rate
UpperCAmelCase__ : Dict = args.adam_epsilon
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 718
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'wavlm'
def __init__( self : Optional[Any] , _A : Union[str, Any]=32 , _A : Any=768 , _A : Dict=12 , _A : Optional[Any]=12 , _A : Optional[Any]=3_072 , _A : Any="gelu" , _A : Union[str, Any]=0.1 , _A : Any=0.1 , _A : Optional[Any]=0.1 , _A : Dict=0.0 , _A : Tuple=0.1 , _A : str=0.1 , _A : Union[str, Any]=0.0_2 , _A : Optional[Any]=1e-5 , _A : str="group" , _A : int="gelu" , _A : Tuple=(512, 512, 512, 512, 512, 512, 512) , _A : Tuple=(5, 2, 2, 2, 2, 2, 2) , _A : int=(10, 3, 3, 3, 3, 2, 2) , _A : Optional[int]=False , _A : str=128 , _A : str=16 , _A : Optional[int]=320 , _A : Any=800 , _A : Any=False , _A : Tuple=True , _A : Optional[Any]=0.0_5 , _A : str=10 , _A : int=2 , _A : Optional[int]=0.0 , _A : int=10 , _A : List[str]=320 , _A : Tuple=2 , _A : Dict=0.1 , _A : Union[str, Any]=100 , _A : Tuple=256 , _A : Dict=256 , _A : List[str]=0.1 , _A : str="mean" , _A : Optional[int]=False , _A : Optional[Any]=False , _A : Any=256 , _A : Union[str, Any]=(512, 512, 512, 512, 1_500) , _A : str=(5, 3, 3, 1, 1) , _A : Union[str, Any]=(1, 2, 3, 1, 1) , _A : str=512 , _A : Optional[int]=80 , _A : List[Any]=0 , _A : Optional[int]=1 , _A : List[str]=2 , _A : Optional[int]=False , _A : str=3 , _A : Dict=2 , _A : List[str]=3 , _A : Optional[Any]=None , **_A : Tuple , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Optional[Any] = feat_extract_norm
UpperCAmelCase__ : str = feat_extract_activation
UpperCAmelCase__ : Tuple = list(_A )
UpperCAmelCase__ : Union[str, Any] = list(_A )
UpperCAmelCase__ : Optional[Any] = list(_A )
UpperCAmelCase__ : Optional[Any] = conv_bias
UpperCAmelCase__ : List[Any] = num_buckets
UpperCAmelCase__ : Optional[Any] = max_bucket_distance
UpperCAmelCase__ : int = num_conv_pos_embeddings
UpperCAmelCase__ : Optional[Any] = num_conv_pos_embedding_groups
UpperCAmelCase__ : Any = len(self.conv_dim )
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[Any] = hidden_dropout
UpperCAmelCase__ : Tuple = attention_dropout
UpperCAmelCase__ : str = activation_dropout
UpperCAmelCase__ : str = feat_proj_dropout
UpperCAmelCase__ : Tuple = final_dropout
UpperCAmelCase__ : List[str] = layerdrop
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Any = num_ctc_classes
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Optional[int] = do_stable_layer_norm
UpperCAmelCase__ : Union[str, Any] = use_weighted_layer_sum
UpperCAmelCase__ : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ : List[str] = apply_spec_augment
UpperCAmelCase__ : str = mask_time_prob
UpperCAmelCase__ : int = mask_time_length
UpperCAmelCase__ : Optional[int] = mask_time_min_masks
UpperCAmelCase__ : int = mask_feature_prob
UpperCAmelCase__ : Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCAmelCase__ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase__ : List[str] = num_codevector_groups
UpperCAmelCase__ : Optional[int] = contrastive_logits_temperature
UpperCAmelCase__ : Optional[int] = num_negatives
UpperCAmelCase__ : List[Any] = codevector_dim
UpperCAmelCase__ : Union[str, Any] = proj_codevector_dim
UpperCAmelCase__ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase__ : str = ctc_loss_reduction
UpperCAmelCase__ : Optional[Any] = ctc_zero_infinity
# adapter
UpperCAmelCase__ : Union[str, Any] = add_adapter
UpperCAmelCase__ : List[Any] = adapter_kernel_size
UpperCAmelCase__ : Union[str, Any] = adapter_stride
UpperCAmelCase__ : Tuple = num_adapter_layers
UpperCAmelCase__ : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase__ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase__ : Optional[int] = list(_A )
UpperCAmelCase__ : str = list(_A )
UpperCAmelCase__ : Any = list(_A )
UpperCAmelCase__ : List[Any] = xvector_output_dim
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 312
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( _A: int = 4 ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase = abs(_A ) or 4
return [[1 + x + y * row_size for x in range(_A )] for y in range(_A )]
def snake_case__ ( _A: str ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(_A ) )
# OR.. transpose(reverse_column(matrix))
def snake_case__ ( _A: Any ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(_A ) )
# OR.. reverse_column(reverse_row(matrix))
def snake_case__ ( _A: Any ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(_A ) )
# OR.. transpose(reverse_row(matrix))
def snake_case__ ( _A: Dict ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase = [list(_A ) for x in zip(*_A )]
return matrix
def snake_case__ ( _A: str ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase = matrix[::-1]
return matrix
def snake_case__ ( _A: Optional[Any] ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def snake_case__ ( _A: Dict ) -> None:
'''simple docstring'''
for i in matrix:
print(*_A )
if __name__ == "__main__":
__lowercase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
__lowercase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
__lowercase = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 370
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase (__snake_case ):
def __init__( self :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] ) ->Dict:
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase : Optional[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__magic_name__ , scheduler=__magic_name__ )
@torch.no_grad()
def __call__( self :Optional[int] , __magic_name__ :int = 1 , __magic_name__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ :float = 0.0 , __magic_name__ :int = 50 , __magic_name__ :Optional[bool] = None , __magic_name__ :Optional[str] = "pil" , __magic_name__ :bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __magic_name__ ):
lowercase : int = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase : Tuple = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__magic_name__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowercase : Dict = randn_tensor(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__magic_name__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Union[str, Any] = self.unet(__magic_name__ , __magic_name__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : int = self.scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , eta=__magic_name__ , use_clipped_model_output=__magic_name__ , generator=__magic_name__ ).prev_sample
lowercase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Any = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 264
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Dict = "open-llama"
def __init__(self ,_lowerCamelCase=100000 ,_lowerCamelCase=4096 ,_lowerCamelCase=11008 ,_lowerCamelCase=32 ,_lowerCamelCase=32 ,_lowerCamelCase="silu" ,_lowerCamelCase=2048 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-6 ,_lowerCamelCase=True ,_lowerCamelCase=0 ,_lowerCamelCase=1 ,_lowerCamelCase=2 ,_lowerCamelCase=False ,_lowerCamelCase=True ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> int:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = rms_norm_eps
__lowercase = use_cache
__lowercase = kwargs.pop(
'''use_memorry_efficient_attention''' ,_lowerCamelCase )
__lowercase = hidden_dropout_prob
__lowercase = attention_dropout_prob
__lowercase = use_stable_embedding
__lowercase = shared_input_output_embedding
__lowercase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,tie_word_embeddings=_lowerCamelCase ,**_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
__lowercase = self.rope_scaling.get('''type''' ,_lowerCamelCase )
__lowercase = self.rope_scaling.get('''factor''' ,_lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_lowerCamelCase ,_lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 717
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = '''Normal'''
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = '''Abnormality detected'''
| 56
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : str ,a__ : int = 7_68 ,):
super().__init__()
a__ = nn.Parameter(torch.zeros(1 ,a__ ) )
a__ = nn.Parameter(torch.ones(1 ,a__ ) )
def lowerCAmelCase_ ( self : List[Any] ,a__ : Optional[Union[str, torch.device]] = None ,a__ : Optional[torch.dtype] = None ,):
a__ = nn.Parameter(self.mean.to(a__ ).to(a__ ) )
a__ = nn.Parameter(self.std.to(a__ ).to(a__ ) )
return self
def lowerCAmelCase_ ( self : List[str] ,a__ : Optional[int] ):
a__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase_ ( self : List[str] ,a__ : int ):
a__ = (embeds * self.std) + self.mean
return embeds
| 331
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : str ,a__ : int ,a__ : int ,a__ : int ,a__ : float ,a__ : int ,a__ : int ,a__ : int ,a__ : int ,a__ : str ,a__ : bool = False ,):
super().__init__()
a__ = nn.Embedding(a__ ,a__ )
a__ = nn.Embedding(a__ ,a__ )
a__ = False
a__ = nn.Dropout(p=a__ )
a__ = TaConfig(
vocab_size=a__ ,d_model=a__ ,num_heads=a__ ,d_kv=a__ ,d_ff=a__ ,dropout_rate=a__ ,feed_forward_proj=a__ ,is_decoder=a__ ,is_encoder_decoder=a__ ,)
a__ = nn.ModuleList()
for lyr_num in range(a__ ):
a__ = TaBlock(a__ )
self.encoders.append(a__ )
a__ = TaLayerNorm(a__ )
a__ = nn.Dropout(p=a__ )
def lowerCAmelCase_ ( self : Optional[Any] ,a__ : Tuple ,a__ : Optional[int] ):
a__ = self.token_embedder(a__ )
a__ = encoder_input_tokens.shape[1]
a__ = torch.arange(a__ ,device=encoder_input_tokens.device )
x += self.position_encoding(a__ )
a__ = self.dropout_pre(a__ )
# inverted the attention mask
a__ = encoder_input_tokens.size()
a__ = self.get_extended_attention_mask(a__ ,a__ )
for lyr in self.encoders:
a__ = lyr(a__ ,a__ )[0]
a__ = self.layer_norm(a__ )
return self.dropout_post(a__ ), encoder_inputs_mask
| 331
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = 42
__UpperCAmelCase = 42
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> list[str]:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowerCAmelCase ) )]
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_UpperCamelCase : Any = all_rotations(__lowerCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCamelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowerCAmelCase ),
}
return response
def __lowerCAmelCase ( __lowerCAmelCase : str , __lowerCAmelCase : int ) -> str:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_UpperCamelCase : Optional[Any] = int(__lowerCAmelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowerCAmelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_UpperCamelCase : Any = [""] * len(__lowerCAmelCase )
for _ in range(len(__lowerCAmelCase ) ):
for i in range(len(__lowerCAmelCase ) ):
_UpperCamelCase : str = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = """Provide a string that I will generate its BWT transform: """
_SCREAMING_SNAKE_CASE = input(entry_msg).strip()
_SCREAMING_SNAKE_CASE = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
_SCREAMING_SNAKE_CASE = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 239
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__(self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase : List[str] = None
if self.model.config.prefix is not None:
_UpperCamelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase : Union[str, Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = self._sanitize_parameters(prefix=lowerCAmelCase__ , **self._forward_params )
_UpperCamelCase : str = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase : List[str] = {**self._forward_params, **forward_params}
def lowercase_ (self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = {}
if prefix is not None:
_UpperCamelCase : Union[str, Any] = prefix
if prefix:
_UpperCamelCase : Optional[int] = self.tokenizer(
lowerCAmelCase__ , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework )
_UpperCamelCase : List[str] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
" [None, 'hole']" )
_UpperCamelCase : Tuple = handle_long_generation
preprocess_params.update(lowerCAmelCase__ )
_UpperCamelCase : List[str] = generate_kwargs
_UpperCamelCase : Optional[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
_UpperCamelCase : List[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
_UpperCamelCase : List[str] = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase : Any = self.tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
_UpperCamelCase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __call__(self , lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__=None , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework )
_UpperCamelCase : List[str] = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase : Union[str, Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase : Union[str, Any] = generate_kwargs["max_new_tokens"]
else:
_UpperCamelCase : str = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
_UpperCamelCase : Tuple = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase : Tuple = inputs["attention_mask"][:, -keep_length:]
return inputs
def lowercase_ (self , lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = model_inputs["input_ids"]
_UpperCamelCase : List[str] = model_inputs.get("attention_mask" , lowerCAmelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Dict = 1
else:
_UpperCamelCase : Any = input_ids.shape[0]
_UpperCamelCase : Tuple = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase : Tuple = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
_UpperCamelCase : Tuple = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase : List[str] = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase : List[str] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase : Optional[Any] = self.model.generate(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : List[str] = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase : str = generated_sequence.reshape(lowerCAmelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase : Union[str, Any] = tf.reshape(lowerCAmelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__=ReturnType.FULL_TEXT , lowerCAmelCase__=True ):
'''simple docstring'''
_UpperCamelCase : str = model_outputs["generated_sequence"][0]
_UpperCamelCase : Tuple = model_outputs["input_ids"]
_UpperCamelCase : Dict = model_outputs["prompt_text"]
_UpperCamelCase : str = generated_sequence.numpy().tolist()
_UpperCamelCase : Optional[int] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase : List[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase : Dict = self.tokenizer.decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase : int = 0
else:
_UpperCamelCase : Optional[int] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase : int = prompt_text + text[prompt_length:]
else:
_UpperCamelCase : Any = text[prompt_length:]
_UpperCamelCase : Dict = {"generated_text": all_text}
records.append(lowerCAmelCase__ )
return records
| 239
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _a ( _UpperCAmelCase ):
'''simple docstring'''
A :Dict = 'sew'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.0_5 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
a__ : Union[str, Any] = hidden_size
a__ : int = feat_extract_norm
a__ : Optional[int] = feat_extract_activation
a__ : Any = list(SCREAMING_SNAKE_CASE_ )
a__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ )
a__ : str = list(SCREAMING_SNAKE_CASE_ )
a__ : Tuple = conv_bias
a__ : Dict = num_conv_pos_embeddings
a__ : Optional[Any] = num_conv_pos_embedding_groups
a__ : Dict = len(self.conv_dim )
a__ : Optional[Any] = num_hidden_layers
a__ : Tuple = intermediate_size
a__ : List[Any] = squeeze_factor
a__ : List[str] = hidden_act
a__ : Dict = num_attention_heads
a__ : Dict = hidden_dropout
a__ : Tuple = attention_dropout
a__ : Dict = activation_dropout
a__ : Optional[int] = feat_proj_dropout
a__ : Tuple = final_dropout
a__ : str = layerdrop
a__ : int = layer_norm_eps
a__ : int = initializer_range
a__ : Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : Optional[Any] = apply_spec_augment
a__ : Tuple = mask_time_prob
a__ : Any = mask_time_length
a__ : int = mask_time_min_masks
a__ : int = mask_feature_prob
a__ : Dict = mask_feature_length
a__ : List[str] = mask_feature_min_masks
# ctc loss
a__ : Any = ctc_loss_reduction
a__ : List[Any] = ctc_zero_infinity
# sequence classification
a__ : List[Any] = use_weighted_layer_sum
a__ : Union[str, Any] = classifier_proj_size
@property
def _A ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 191
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A__ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 13
| 0
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_snake_case : Optional[int] = logging.get_logger(__name__)
# General docstring
_snake_case : Any = """ResNetConfig"""
# Base docstring
_snake_case : List[str] = """microsoft/resnet-50"""
_snake_case : Tuple = [1, 2_048, 7, 7]
# Image classification docstring
_snake_case : Any = """microsoft/resnet-50"""
_snake_case : List[Any] = """tiger cat"""
_snake_case : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = 3 , UpperCamelCase = 1 , UpperCamelCase = "relu" ):
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Convad(
UpperCamelCase , UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=kernel_size // 2 , bias=UpperCamelCase )
_SCREAMING_SNAKE_CASE = nn.BatchNormad(UpperCamelCase )
_SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = self.convolution(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.normalization(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.activation(UpperCamelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase ):
super().__init__()
_SCREAMING_SNAKE_CASE = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_SCREAMING_SNAKE_CASE = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_SCREAMING_SNAKE_CASE = config.num_channels
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
_SCREAMING_SNAKE_CASE = self.embedder(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.pooler(UpperCamelCase )
return embedding
class lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = 2 ):
super().__init__()
_SCREAMING_SNAKE_CASE = nn.Convad(UpperCamelCase , UpperCamelCase , kernel_size=1 , stride=UpperCamelCase , bias=UpperCamelCase )
_SCREAMING_SNAKE_CASE = nn.BatchNormad(UpperCamelCase )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = self.convolution(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.normalization(UpperCamelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = "relu" ):
super().__init__()
_SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
_SCREAMING_SNAKE_CASE = (
ResNetShortCut(UpperCamelCase , UpperCamelCase , stride=UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
_SCREAMING_SNAKE_CASE = nn.Sequential(
ResNetConvLayer(UpperCamelCase , UpperCamelCase , stride=UpperCamelCase ) , ResNetConvLayer(UpperCamelCase , UpperCamelCase , activation=UpperCamelCase ) , )
_SCREAMING_SNAKE_CASE = ACTaFN[activation]
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = hidden_state
_SCREAMING_SNAKE_CASE = self.layer(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.shortcut(UpperCamelCase )
hidden_state += residual
_SCREAMING_SNAKE_CASE = self.activation(UpperCamelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 , UpperCamelCase = "relu" , UpperCamelCase = 4 ):
super().__init__()
_SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
_SCREAMING_SNAKE_CASE = out_channels // reduction
_SCREAMING_SNAKE_CASE = (
ResNetShortCut(UpperCamelCase , UpperCamelCase , stride=UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
_SCREAMING_SNAKE_CASE = nn.Sequential(
ResNetConvLayer(UpperCamelCase , UpperCamelCase , kernel_size=1 ) , ResNetConvLayer(UpperCamelCase , UpperCamelCase , stride=UpperCamelCase ) , ResNetConvLayer(UpperCamelCase , UpperCamelCase , kernel_size=1 , activation=UpperCamelCase ) , )
_SCREAMING_SNAKE_CASE = ACTaFN[activation]
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = hidden_state
_SCREAMING_SNAKE_CASE = self.layer(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.shortcut(UpperCamelCase )
hidden_state += residual
_SCREAMING_SNAKE_CASE = self.activation(UpperCamelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 2 , UpperCamelCase = 2 , ):
super().__init__()
_SCREAMING_SNAKE_CASE = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
_SCREAMING_SNAKE_CASE = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCamelCase , UpperCamelCase , stride=UpperCamelCase , activation=config.hidden_act ) , *[layer(UpperCamelCase , UpperCamelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = input
for layer in self.layers:
_SCREAMING_SNAKE_CASE = layer(UpperCamelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self , UpperCamelCase ):
super().__init__()
_SCREAMING_SNAKE_CASE = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCamelCase , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCamelCase , UpperCamelCase , UpperCamelCase , depth=UpperCamelCase ) )
def lowercase ( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = True ):
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
_SCREAMING_SNAKE_CASE = stage_module(UpperCamelCase )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase , hidden_states=UpperCamelCase , )
class lowerCAmelCase ( __UpperCAmelCase ):
a : Optional[Any] = ResNetConfig
a : str = """resnet"""
a : List[Any] = """pixel_values"""
a : Optional[int] = True
def lowercase ( self , UpperCamelCase ):
if isinstance(UpperCamelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase ( self , UpperCamelCase , UpperCamelCase=False ):
if isinstance(UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = value
_snake_case : Union[str, Any] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_snake_case : Optional[int] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , __UpperCAmelCase , )
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase ):
super().__init__(UpperCamelCase )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = ResNetEmbeddings(UpperCamelCase )
_SCREAMING_SNAKE_CASE = ResNetEncoder(UpperCamelCase )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None ):
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.embedder(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.encoder(
UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
_SCREAMING_SNAKE_CASE = encoder_outputs[0]
_SCREAMING_SNAKE_CASE = self.pooler(UpperCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , __UpperCAmelCase , )
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , UpperCamelCase ):
super().__init__(UpperCamelCase )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = ResNetModel(UpperCamelCase )
# classification head
_SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase ( self , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , ):
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.resnet(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(UpperCamelCase )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = "single_label_classification"
else:
_SCREAMING_SNAKE_CASE = "multi_label_classification"
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , __UpperCAmelCase , )
class lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
def __init__( self , UpperCamelCase ):
super().__init__(UpperCamelCase )
super()._init_backbone(UpperCamelCase )
_SCREAMING_SNAKE_CASE = [config.embedding_size] + config.hidden_sizes
_SCREAMING_SNAKE_CASE = ResNetEmbeddings(UpperCamelCase )
_SCREAMING_SNAKE_CASE = ResNetEncoder(UpperCamelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@replace_return_docstrings(output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC )
def lowercase ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None ):
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = self.embedder(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.encoder(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
_SCREAMING_SNAKE_CASE = outputs.hidden_states
_SCREAMING_SNAKE_CASE = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_SCREAMING_SNAKE_CASE = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCamelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCamelCase , )
| 493
|
'''simple docstring'''
import numpy
# List of input, output pairs
_snake_case : Optional[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_snake_case : Any = (((515, 22, 13), 555), ((61, 35, 49), 150))
_snake_case : Any = [2, 4, 1, 5]
_snake_case : str = len(train_data)
_snake_case : int = 0.009
def _a ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]="train" ):
return calculate_hypothesis_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - output(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : List[Any] ):
_SCREAMING_SNAKE_CASE = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _a ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _a ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int]=m ):
_SCREAMING_SNAKE_CASE = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(_SCREAMING_SNAKE_CASE )
else:
summation_value += _error(_SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _a ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
_SCREAMING_SNAKE_CASE = summation_of_cost_derivative(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _a ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_SCREAMING_SNAKE_CASE = 0.000_002
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
while True:
j += 1
_SCREAMING_SNAKE_CASE = [0, 0, 0, 0]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
_SCREAMING_SNAKE_CASE = get_cost_derivative(i - 1 )
_SCREAMING_SNAKE_CASE = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE , rtol=_SCREAMING_SNAKE_CASE , ):
break
_SCREAMING_SNAKE_CASE = temp_parameter_vector
print(("Number of iterations:", j) )
def _a ( ):
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(_SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 493
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class _snake_case :
"""simple docstring"""
def __init__( self : Any , _A : Optional[int]=None , _A : int=None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = list(poly_a or [0])[:]
_SCREAMING_SNAKE_CASE : Optional[Any] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_SCREAMING_SNAKE_CASE : Optional[int] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
_SCREAMING_SNAKE_CASE : str = len(self.polyB)
# Add 0 to make lengths equal a power of 2
_SCREAMING_SNAKE_CASE : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
_SCREAMING_SNAKE_CASE : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
_SCREAMING_SNAKE_CASE : List[Any] = self.__multiply()
def _lowerCAmelCase ( self : List[str] , _A : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(_A) <= 1:
return dft[0]
#
_SCREAMING_SNAKE_CASE : int = self.c_max_length // 2
while next_ncol > 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[] for i in range(_A)]
_SCREAMING_SNAKE_CASE : Any = self.root**next_ncol
# First half of next step
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_A):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_A):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
_SCREAMING_SNAKE_CASE : Optional[int] = new_dft
_SCREAMING_SNAKE_CASE : List[str] = next_ncol // 2
return dft[0]
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.__dft("""A""")
_SCREAMING_SNAKE_CASE : Any = self.__dft("""B""")
_SCREAMING_SNAKE_CASE : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
_SCREAMING_SNAKE_CASE : Any = 2
while next_ncol <= self.c_max_length:
_SCREAMING_SNAKE_CASE : int = [[] for i in range(_A)]
_SCREAMING_SNAKE_CASE : List[str] = self.root ** (next_ncol // 2)
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
_SCREAMING_SNAKE_CASE : str = new_inverse_c
next_ncol *= 2
# Unpack
_SCREAMING_SNAKE_CASE : List[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """A = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A]))
_SCREAMING_SNAKE_CASE : Optional[int] = """B = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B]))
_SCREAMING_SNAKE_CASE : Tuple = """A*B = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product))
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
'''simple docstring'''
def __init__( self: int , _lowerCamelCase: str , _lowerCamelCase: Union[str, Any]=13 , _lowerCamelCase: Dict=7 , _lowerCamelCase: Union[str, Any]=True , _lowerCamelCase: Optional[Any]=True , _lowerCamelCase: str=True , _lowerCamelCase: List[Any]=True , _lowerCamelCase: List[Any]=True , _lowerCamelCase: List[str]=False , _lowerCamelCase: Dict=False , _lowerCamelCase: Tuple=False , _lowerCamelCase: List[Any]=2 , _lowerCamelCase: List[Any]=99 , _lowerCamelCase: Optional[int]=0 , _lowerCamelCase: Union[str, Any]=32 , _lowerCamelCase: Optional[Any]=5 , _lowerCamelCase: str=4 , _lowerCamelCase: str=0.1 , _lowerCamelCase: Optional[int]=0.1 , _lowerCamelCase: List[Any]=5_12 , _lowerCamelCase: List[Any]=2 , _lowerCamelCase: Union[str, Any]=0.02 , _lowerCamelCase: str=2 , _lowerCamelCase: str=4 , _lowerCamelCase: Any="last" , _lowerCamelCase: Optional[int]=True , _lowerCamelCase: List[Any]=None , _lowerCamelCase: Union[str, Any]=0 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_lengths
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = gelu_activation
SCREAMING_SNAKE_CASE_ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE_ = causal
SCREAMING_SNAKE_CASE_ = asm
SCREAMING_SNAKE_CASE_ = n_langs
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = n_special
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = summary_type
SCREAMING_SNAKE_CASE_ = use_proj
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = bos_token_id
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A ( self: Any ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A ( self: List[str] , _lowerCamelCase: List[Any] , _lowerCamelCase: str , _lowerCamelCase: Optional[int] , _lowerCamelCase: Tuple , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: List[Any] , _lowerCamelCase: Any , _lowerCamelCase: Any , ):
SCREAMING_SNAKE_CASE_ = XLMModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , lengths=_lowerCamelCase , langs=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , langs=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self: int , _lowerCamelCase: Optional[Any] , _lowerCamelCase: int , _lowerCamelCase: Tuple , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: int , ):
SCREAMING_SNAKE_CASE_ = XLMWithLMHeadModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self: List[Any] , _lowerCamelCase: str , _lowerCamelCase: Dict , _lowerCamelCase: int , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: int , _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: List[Any] , ):
SCREAMING_SNAKE_CASE_ = XLMForQuestionAnsweringSimple(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self: Dict , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Tuple , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: List[Any] , ):
SCREAMING_SNAKE_CASE_ = XLMForQuestionAnswering(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(
_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , cls_index=_lowerCamelCase , is_impossible=_lowerCamelCase , p_mask=_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = model(
_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , cls_index=_lowerCamelCase , is_impossible=_lowerCamelCase , )
((SCREAMING_SNAKE_CASE_) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase )
((SCREAMING_SNAKE_CASE_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A ( self: Any , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: int , _lowerCamelCase: Any , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Tuple , ):
SCREAMING_SNAKE_CASE_ = XLMForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A ( self: str , _lowerCamelCase: List[Any] , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any , _lowerCamelCase: Any , _lowerCamelCase: str , _lowerCamelCase: int , _lowerCamelCase: Optional[int] , _lowerCamelCase: Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = XLMForTokenClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self: List[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: str , _lowerCamelCase: Tuple , _lowerCamelCase: Any , _lowerCamelCase: str , _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[Any] , ):
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = XLMForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE__ : str = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A ( self: Dict , _lowerCamelCase: List[Any] , _lowerCamelCase: str , _lowerCamelCase: List[str] , _lowerCamelCase: List[Any] , _lowerCamelCase: str ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A ( self: Tuple , _lowerCamelCase: List[str] , _lowerCamelCase: Tuple , _lowerCamelCase: int=False ):
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = XLMModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCamelCase , emb_dim=37 )
def _A ( self: Tuple ):
self.config_tester.run_common_tests()
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_lowerCamelCase )
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_lowerCamelCase )
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_lowerCamelCase )
def _A ( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_lowerCamelCase )
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_lowerCamelCase )
def _A ( self: Dict ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_lowerCamelCase )
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_lowerCamelCase )
def _A ( self: Any , _lowerCamelCase: Optional[Any] , _lowerCamelCase: int , _lowerCamelCase: Tuple , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: str=False , _lowerCamelCase: List[Any]=1 ):
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(
[isinstance(_lowerCamelCase , _lowerCamelCase ) for iter_attentions in attentions] , [True] * len(_lowerCamelCase ) )
self.assertEqual(len(_lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_lowerCamelCase ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE_ = min_length + idx + 1
SCREAMING_SNAKE_CASE_ = min_length + idx + 1
SCREAMING_SNAKE_CASE_ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_lowerCamelCase ) )
def _A ( self: List[Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: str , _lowerCamelCase: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: Union[str, Any]=False , _lowerCamelCase: Optional[int]=1 ):
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(
[isinstance(_lowerCamelCase , _lowerCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(_lowerCamelCase ) , )
self.assertEqual(len(_lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_lowerCamelCase ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE_ = min_length + idx + 1
SCREAMING_SNAKE_CASE_ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_lowerCamelCase ) , )
pass
@slow
def _A ( self: List[str] ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = XLMModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
class __magic_name__ ( unittest.TestCase):
'''simple docstring'''
@slow
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor([[14, 4_47]] , dtype=torch.long , device=_lowerCamelCase ) # the president
SCREAMING_SNAKE_CASE_ = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _lowerCamelCase )
| 89
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , _lowerCAmelCase ).groups()[0]
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
def __init__( self: Union[str, Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple=None , _lowerCamelCase: str=None ):
SCREAMING_SNAKE_CASE_ = file_names
SCREAMING_SNAKE_CASE_ = image_transform
SCREAMING_SNAKE_CASE_ = label_to_id
def __len__( self: Any ):
return len(self.file_names )
def __getitem__( self: Union[str, Any] , _lowerCamelCase: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.file_names[idx]
SCREAMING_SNAKE_CASE_ = PIL.Image.open(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = raw_image.convert('''RGB''' )
if self.image_transform is not None:
SCREAMING_SNAKE_CASE_ = self.image_transform(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = extract_label(_lowerCamelCase )
if self.label_to_id is not None:
SCREAMING_SNAKE_CASE_ = self.label_to_id[label]
return {"image": image, "label": label}
def a (_lowerCAmelCase , _lowerCAmelCase ):
# Initialize accelerator
if args.with_tracking:
SCREAMING_SNAKE_CASE_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ = config['''lr''']
SCREAMING_SNAKE_CASE_ = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE_ = int(config['''seed'''] )
SCREAMING_SNAKE_CASE_ = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE_ = config['''image_size''']
if not isinstance(_lowerCAmelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE_ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE_ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
SCREAMING_SNAKE_CASE_ = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
SCREAMING_SNAKE_CASE_ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
SCREAMING_SNAKE_CASE_ = os.path.split(_lowerCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase )
# Grab all the image filenames
SCREAMING_SNAKE_CASE_ = [os.path.join(args.data_dir , _lowerCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
SCREAMING_SNAKE_CASE_ = [extract_label(_lowerCAmelCase ) for fname in file_names]
SCREAMING_SNAKE_CASE_ = list(set(_lowerCAmelCase ) )
id_to_label.sort()
SCREAMING_SNAKE_CASE_ = {lbl: i for i, lbl in enumerate(_lowerCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# Split our filenames between train and validation
SCREAMING_SNAKE_CASE_ = np.random.permutation(len(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = int(0.8 * len(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = random_perm[:cut]
SCREAMING_SNAKE_CASE_ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
SCREAMING_SNAKE_CASE_ = Compose([RandomResizedCrop(_lowerCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
SCREAMING_SNAKE_CASE_ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowerCAmelCase , label_to_id=_lowerCAmelCase )
# For evaluation, we use a deterministic Resize
SCREAMING_SNAKE_CASE_ = Compose([Resize(_lowerCAmelCase ), ToTensor()] )
SCREAMING_SNAKE_CASE_ = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowerCAmelCase , label_to_id=_lowerCAmelCase )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ = create_model('''resnet50d''' , pretrained=_lowerCAmelCase , num_classes=len(_lowerCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
SCREAMING_SNAKE_CASE_ = False
for param in model.get_classifier().parameters():
SCREAMING_SNAKE_CASE_ = True
# We normalize the batches of images to be a bit faster.
SCREAMING_SNAKE_CASE_ = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
SCREAMING_SNAKE_CASE_ = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
SCREAMING_SNAKE_CASE_ = OneCycleLR(optimizer=_lowerCAmelCase , max_lr=_lowerCAmelCase , epochs=_lowerCAmelCase , steps_per_epoch=len(_lowerCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE_ = 0
# We also need to keep track of the starting epoch so files are named properly
SCREAMING_SNAKE_CASE_ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE_ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
SCREAMING_SNAKE_CASE_ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
SCREAMING_SNAKE_CASE_ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
SCREAMING_SNAKE_CASE_ = os.path.splitext(_lowerCAmelCase )[0]
if "epoch" in training_difference:
SCREAMING_SNAKE_CASE_ = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
SCREAMING_SNAKE_CASE_ = None
else:
SCREAMING_SNAKE_CASE_ = int(training_difference.replace('''step_''' , '''''' ) )
SCREAMING_SNAKE_CASE_ = resume_step // len(_lowerCAmelCase )
resume_step -= starting_epoch * len(_lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
model.train()
if args.with_tracking:
SCREAMING_SNAKE_CASE_ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
SCREAMING_SNAKE_CASE_ = accelerator.skip_first_batches(_lowerCAmelCase , _lowerCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
SCREAMING_SNAKE_CASE_ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE_ = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE_ = (batch['''image'''] - mean) / std
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.cross_entropy(_lowerCAmelCase , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
SCREAMING_SNAKE_CASE_ = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE_ = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE_ = (batch['''image'''] - mean) / std
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch['''label''']) )
SCREAMING_SNAKE_CASE_ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
SCREAMING_SNAKE_CASE_ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {1_0_0 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_0_0 * eval_metric,
'''train_loss''': total_loss.item() / len(_lowerCAmelCase ),
'''epoch''': epoch,
} , step=_lowerCAmelCase , )
if checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE_ = F"epoch_{epoch}"
if args.output_dir is not None:
SCREAMING_SNAKE_CASE_ = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
if args.with_tracking:
accelerator.end_training()
def a ():
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=_lowerCAmelCase , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=_lowerCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=_lowerCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 6_4, '''image_size''': 2_2_4}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 89
| 1
|
import math
from collections.abc import Callable
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
lowerCamelCase__ : float = xa
lowerCamelCase__ : float = xa
while True:
if x_n == x_na or function(UpperCAmelCase ) == function(UpperCAmelCase ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
lowerCamelCase__ : float = x_na - (
function(UpperCAmelCase ) / ((function(UpperCAmelCase ) - function(UpperCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowerCamelCase__ : Any = x_na
lowerCamelCase__ : int = x_na
def _a ( UpperCAmelCase ) -> float:
"""simple docstring"""
return math.pow(UpperCAmelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 315
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_A : List[str] = logging.getLogger(__name__)
_A : Any = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_A : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase_ )} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
_UpperCAmelCase : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
def __lowerCamelCase ( self : int ) ->Optional[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "The name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase : Optional[str] = field(default=lowerCAmelCase_ ,metadata={"help": "The input training data file (a text file)."} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} ,)
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCAmelCase : Optional[int] = field(
default=5 ,metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} ,)
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} ,)
_UpperCAmelCase : Optional[int] = field(
default=lowerCAmelCase_ ,metadata={"help": "The number of processes to use for the preprocessing."} ,)
_UpperCAmelCase : float = field(
default=0.15 ,metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
if self.train_file is not None:
lowerCamelCase__ : int = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase__ : Tuple = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : str = [json.loads(UpperCAmelCase ) for line in f.read().splitlines() if (len(UpperCAmelCase ) > 0 and not line.isspace())]
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
lowerCamelCase__ : int = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase__ : str = refs
return Dataset.from_dict(UpperCAmelCase )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , )
lowerCamelCase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , )
else:
lowerCamelCase__ : List[Any] = {}
if data_args.train_file is not None:
lowerCamelCase__ : List[Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ : Optional[int] = data_args.validation_file
lowerCamelCase__ : Tuple = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
lowerCamelCase__ : List[Any] = '''text'''
lowerCamelCase__ : Tuple = load_dataset(UpperCAmelCase , data_files=UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : int = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
lowerCamelCase__ : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
lowerCamelCase__ : List[str] = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
lowerCamelCase__ : Tuple = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowerCamelCase__ : List[Any] = AutoModelForMaskedLM.from_config(UpperCAmelCase )
model.resize_token_embeddings(len(UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase__ : Optional[int] = datasets['''train'''].column_names
else:
lowerCamelCase__ : Optional[int] = datasets['''validation'''].column_names
lowerCamelCase__ : List[str] = '''text''' if '''text''' in column_names else column_names[0]
lowerCamelCase__ : Dict = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase ):
# Remove empty lines
lowerCamelCase__ : int = [line for line in examples['''text'''] if len(UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=data_args.max_seq_length )
lowerCamelCase__ : Optional[int] = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase__ : Optional[Any] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase__ : str = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase__ : int = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase__ : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase__ : List[Any] = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase__ : Tuple = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase__ : Optional[Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase__ : Union[str, Any] = model_args.model_name_or_path
else:
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Dict = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
lowerCamelCase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ : Tuple = trainer.evaluate()
lowerCamelCase__ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
lowerCamelCase__ : str = perplexity
lowerCamelCase__ : List[Any] = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
return results
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 315
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ : Optional[int] = 16
SCREAMING_SNAKE_CASE__ : List[str] = 32
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ):
SCREAMING_SNAKE_CASE_ :List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ :int = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ :str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ :Tuple = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ :int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ :List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ :List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ :Dict = 8
else:
SCREAMING_SNAKE_CASE_ :List[Any] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='longest' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ :Dict = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ : str = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE ) == "1":
SCREAMING_SNAKE_CASE_ :Tuple = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE_ :Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ :int = config['lr']
SCREAMING_SNAKE_CASE_ :List[str] = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE_ :int = int(config['seed'] )
SCREAMING_SNAKE_CASE_ :Optional[int] = int(config['batch_size'] )
SCREAMING_SNAKE_CASE_ :Optional[Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ :Any = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ :List[str] = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ :Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ :Tuple = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ :Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ :List[Any] = model(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = outputs.loss
SCREAMING_SNAKE_CASE_ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ :int = model(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
SCREAMING_SNAKE_CASE_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ :str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE_ :List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE_ :int = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 233
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : Tuple = 'xlm'
__snake_case : int = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : str , SCREAMING_SNAKE_CASE : Dict=30_145 , SCREAMING_SNAKE_CASE : str=2_048 , SCREAMING_SNAKE_CASE : Optional[int]=12 , SCREAMING_SNAKE_CASE : Union[str, Any]=16 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Dict=1 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : str=512 , SCREAMING_SNAKE_CASE : Any=2_048**-0.5 , SCREAMING_SNAKE_CASE : Dict=1E-12 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : Dict=0 , SCREAMING_SNAKE_CASE : int=1 , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=5 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Optional[int]="first" , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : str=5 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : Optional[Any]=0 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_ :List[str] = emb_dim
SCREAMING_SNAKE_CASE_ :Any = n_layers
SCREAMING_SNAKE_CASE_ :List[Any] = n_heads
SCREAMING_SNAKE_CASE_ :List[Any] = dropout
SCREAMING_SNAKE_CASE_ :Dict = attention_dropout
SCREAMING_SNAKE_CASE_ :Optional[Any] = gelu_activation
SCREAMING_SNAKE_CASE_ :Any = sinusoidal_embeddings
SCREAMING_SNAKE_CASE_ :Any = causal
SCREAMING_SNAKE_CASE_ :str = asm
SCREAMING_SNAKE_CASE_ :Optional[Any] = n_langs
SCREAMING_SNAKE_CASE_ :Any = use_lang_emb
SCREAMING_SNAKE_CASE_ :Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ :int = bos_index
SCREAMING_SNAKE_CASE_ :int = eos_index
SCREAMING_SNAKE_CASE_ :Optional[int] = pad_index
SCREAMING_SNAKE_CASE_ :List[Any] = unk_index
SCREAMING_SNAKE_CASE_ :Tuple = mask_index
SCREAMING_SNAKE_CASE_ :str = is_encoder
SCREAMING_SNAKE_CASE_ :Any = max_position_embeddings
SCREAMING_SNAKE_CASE_ :int = embed_init_std
SCREAMING_SNAKE_CASE_ :Optional[int] = init_std
SCREAMING_SNAKE_CASE_ :Tuple = summary_type
SCREAMING_SNAKE_CASE_ :Union[str, Any] = summary_use_proj
SCREAMING_SNAKE_CASE_ :Optional[int] = summary_activation
SCREAMING_SNAKE_CASE_ :List[str] = summary_proj_to_labels
SCREAMING_SNAKE_CASE_ :Tuple = summary_first_dropout
SCREAMING_SNAKE_CASE_ :Any = start_n_top
SCREAMING_SNAKE_CASE_ :List[Any] = end_n_top
SCREAMING_SNAKE_CASE_ :Optional[Any] = mask_token_id
SCREAMING_SNAKE_CASE_ :Optional[Any] = lang_id
if "n_words" in kwargs:
SCREAMING_SNAKE_CASE_ :int = kwargs['n_words']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class __lowerCAmelCase( lowerCAmelCase__ ):
@property
def _lowercase ( self : Optional[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ :Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ :Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 233
| 1
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : Union[str, "sqlalchemy.sql.Selectable"] , UpperCAmelCase_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
super().__init__(features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =Sql(
cache_dir=UpperCAmelCase_ , features=UpperCAmelCase_ , sql=UpperCAmelCase_ , con=UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =None
lowerCamelCase__: Optional[Any] =None
lowerCamelCase__: Tuple =None
lowerCamelCase__: str =None
self.builder.download_and_prepare(
download_config=UpperCAmelCase_ , download_mode=UpperCAmelCase_ , verification_mode=UpperCAmelCase_ , base_path=UpperCAmelCase_ , )
# Build dataset for splits
lowerCamelCase__: List[str] =self.builder.as_dataset(
split="train" , verification_mode=UpperCAmelCase_ , in_memory=self.keep_in_memory)
return dataset
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[int] , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : Tuple , ) ->str:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""")
lowerCamelCase__: Any =dataset
lowerCamelCase__: str =name
lowerCamelCase__: List[str] =con
lowerCamelCase__: Optional[Any] =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCamelCase__: List[str] =num_proc
lowerCamelCase__: Dict =to_sql_kwargs
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
lowerCamelCase__: Tuple =self.to_sql_kwargs.pop("sql" , UpperCAmelCase_)
lowerCamelCase__: Dict =self.to_sql_kwargs.pop("con" , UpperCAmelCase_)
lowerCamelCase__: List[str] =self.to_sql_kwargs.pop("index" , UpperCAmelCase_)
lowerCamelCase__: Tuple =self._write(index=UpperCAmelCase_ , **self.to_sql_kwargs)
return written
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =args
lowerCamelCase__: Union[str, Any] ={**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
lowerCamelCase__: Tuple =query_table(
table=self.dataset.data , key=slice(UpperCAmelCase_ , offset + self.batch_size) , indices=self.dataset._indices , )
lowerCamelCase__: Any =batch.to_pandas()
lowerCamelCase__: List[Any] =df.to_sql(self.name , self.con , index=UpperCAmelCase_ , **UpperCAmelCase_)
return num_rows or len(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[int]) ->int:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCAmelCase_ , UpperCAmelCase_)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 59
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[Any] , __lowercase : Tuple , __lowercase : Tuple=7 , __lowercase : List[str]=3 , __lowercase : List[Any]=18 , __lowercase : int=30 , __lowercase : Any=4_00 , __lowercase : Dict=True , __lowercase : Dict=None , __lowercase : Union[str, Any]=True , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Dict =num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] =image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =min_resolution
SCREAMING_SNAKE_CASE__ : Dict =max_resolution
SCREAMING_SNAKE_CASE__ : Optional[Any] =do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] =size
SCREAMING_SNAKE_CASE__ : Tuple =apply_ocr
def __magic_name__ ( self : Tuple ) -> List[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __magic_name__ ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =LayoutLMvaImageProcessingTester(self )
@property
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''apply_ocr''' ) )
def __magic_name__ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __magic_name__ ( self : int ) -> Optional[int]:
pass
def __magic_name__ ( self : str ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : int =image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __lowercase )
self.assertIsInstance(encoding.boxes , __lowercase )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Union[str, Any] ) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Dict =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Dict ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __magic_name__ ( self : Tuple ) -> List[Any]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__ : int =LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : Tuple =load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : int =Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_processing(__lowercase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__ : Any =[['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__ : Optional[Any] =[[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowercase )
self.assertListEqual(encoding.boxes , __lowercase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__ : Dict =LayoutLMvaImageProcessor(apply_ocr=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =image_processing(__lowercase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 296
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
A = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _UpperCamelCase ( UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(UpperCamelCase , torch.Tensor ):
return image
elif isinstance(UpperCamelCase , PIL.Image.Image ):
__UpperCAmelCase : int = [image]
__UpperCAmelCase : int = [trans(img.convert("RGB" ) ) for img in image]
__UpperCAmelCase : Dict = torch.stack(UpperCamelCase )
return image
class a__ ( snake_case__ ):
def __init__( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=_A , scheduler=_A)
def a_ ( self : Any , UpperCamelCase_ : Dict):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}")
def a_ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = min(int(num_inference_steps * strength) , _A)
__UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0)
__UpperCAmelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any]=None):
"""simple docstring"""
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A)}")
__UpperCAmelCase : Union[str, Any] = image.to(device=_A , dtype=_A)
if isinstance(_A , _A) and len(_A) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(_A)}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators.")
__UpperCAmelCase : Dict = init_latents.shape
__UpperCAmelCase : Optional[int] = randn_tensor(_A , generator=_A , device=_A , dtype=_A)
# get latents
print("add noise to latents at timestep" , _A)
__UpperCAmelCase : List[Any] = self.scheduler.add_noise(_A , _A , _A)
__UpperCAmelCase : Optional[int] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : Optional[Any] = 0.8 , UpperCamelCase_ : Union[str, Any] = 1 , UpperCamelCase_ : Optional[Any] = None , UpperCamelCase_ : List[str] = 0.0 , UpperCamelCase_ : Optional[int] = 50 , UpperCamelCase_ : Any = None , UpperCamelCase_ : Union[str, Any] = "pil" , UpperCamelCase_ : Union[str, Any] = True , ):
"""simple docstring"""
self.check_inputs(_A)
# 2. Preprocess image
__UpperCAmelCase : int = preprocess(_A)
# 3. set timesteps
self.scheduler.set_timesteps(_A , device=self.device)
__UpperCAmelCase : Tuple = self.get_timesteps(_A , _A , self.device)
__UpperCAmelCase : int = timesteps[:1].repeat(_A)
# 4. Prepare latent variables
__UpperCAmelCase : Dict = self.prepare_latents(_A , _A , _A , self.unet.dtype , self.device , _A)
__UpperCAmelCase : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_A):
# 1. predict noise model_output
__UpperCAmelCase : Optional[Any] = self.unet(_A , _A).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase : List[Any] = self.scheduler.step(
_A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A , ).prev_sample
__UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(_A)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_A)
| 711
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__UpperCAmelCase : Optional[int] = mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
__UpperCAmelCase : Any = max(
mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , j - wt[i - 1] ) + val[i - 1] , )
__UpperCAmelCase : str = val
return f[i][j]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__UpperCAmelCase : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__UpperCAmelCase : int = dp[i - 1][w_]
return dp[n][w_], dp
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if not (isinstance(UpperCamelCase , (list, tuple) ) and isinstance(UpperCamelCase , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
__UpperCAmelCase : List[Any] = len(UpperCamelCase )
if num_items != len(UpperCamelCase ):
__UpperCAmelCase : int = (
"The number of weights must be the same as the number of values.\n"
f"But got {num_items} weights and {len(UpperCamelCase )} values"
)
raise ValueError(UpperCamelCase )
for i in range(UpperCamelCase ):
if not isinstance(wt[i] , UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"All weights must be integers but got weight of "
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : set = set()
_construct_solution(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return optimal_val, example_optional_set
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase , UpperCamelCase , i - 1 , UpperCamelCase , UpperCamelCase )
else:
optimal_set.add(UpperCamelCase )
_construct_solution(UpperCamelCase , UpperCamelCase , i - 1 , j - wt[i - 1] , UpperCamelCase )
if __name__ == "__main__":
A = [3, 2, 4, 4]
A = [4, 3, 2, 3]
A = 4
A = 6
A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
A , A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
A , A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 487
| 0
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_: Any ='''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
SCREAMING_SNAKE_CASE_: int ='''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
SCREAMING_SNAKE_CASE_: Tuple ='''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : List[Any] = None , snake_case_ : List[str] = False , ) -> List[str]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
UpperCAmelCase_ = new_id
# turn into Numpy arrays
UpperCAmelCase_ = np.array(_UpperCAmelCase )
UpperCAmelCase_ = np.array(_UpperCAmelCase )
if reduce_labels:
UpperCAmelCase_ = 2_55
UpperCAmelCase_ = label - 1
UpperCAmelCase_ = 2_55
UpperCAmelCase_ = label != ignore_index
UpperCAmelCase_ = np.not_equal(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = pred_label[mask]
UpperCAmelCase_ = np.array(_UpperCAmelCase )[mask]
UpperCAmelCase_ = pred_label[pred_label == label]
UpperCAmelCase_ = np.histogram(_UpperCAmelCase , bins=_UpperCAmelCase , range=(0, num_labels - 1) )[0]
UpperCAmelCase_ = np.histogram(_UpperCAmelCase , bins=_UpperCAmelCase , range=(0, num_labels - 1) )[0]
UpperCAmelCase_ = np.histogram(_UpperCAmelCase , bins=_UpperCAmelCase , range=(0, num_labels - 1) )[0]
UpperCAmelCase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : int = None , snake_case_ : int = False , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
UpperCAmelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
UpperCAmelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
UpperCAmelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = intersect_and_union(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] = None , snake_case_ : str = None , snake_case_ : List[Any] = False , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = total_intersect_and_union(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# compute metrics
UpperCAmelCase_ = {}
UpperCAmelCase_ = total_area_intersect.sum() / total_area_label.sum()
UpperCAmelCase_ = total_area_intersect / total_area_union
UpperCAmelCase_ = total_area_intersect / total_area_label
UpperCAmelCase_ = np.nanmean(_UpperCAmelCase )
UpperCAmelCase_ = np.nanmean(_UpperCAmelCase )
UpperCAmelCase_ = all_acc
UpperCAmelCase_ = iou
UpperCAmelCase_ = acc
if nan_to_num is not None:
UpperCAmelCase_ = {metric: np.nan_to_num(_UpperCAmelCase , nan=_UpperCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def _lowercase (self : Any , __a : List[Any] , __a : int , __a : int , __a : bool , __a : Optional[int] = None , __a : Optional[Dict[int, int]] = None , __a : bool = False , ):
UpperCAmelCase_ = mean_iou(
results=UpperCAmelCase_ , gt_seg_maps=UpperCAmelCase_ , num_labels=UpperCAmelCase_ , ignore_index=UpperCAmelCase_ , nan_to_num=UpperCAmelCase_ , label_map=UpperCAmelCase_ , reduce_labels=UpperCAmelCase_ , )
return iou_result
| 78
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : Optional[Any] = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 343
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ : Optional[Any] = len(a__ )
lowercase__ : int = []
for i in range(len(a__ ) - pat_len + 1 ):
lowercase__ : str = True
for j in range(a__ ):
if s[i + j] != pattern[j]:
lowercase__ : List[str] = False
break
if match_found:
position.append(a__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 714
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any:
lowercase__ : List[str] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Any = type_sequence_label_size
lowercase__ : Dict = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Tuple = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : int = []
for i in range(self.batch_size ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=a )
lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a )
labels.append(a )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _UpperCAmelCase ( self , a , a , a ) -> int:
lowercase__ : List[str] = YolosModel(config=a )
model.to(a )
model.eval()
lowercase__ : List[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]:
lowercase__ : str = YolosForObjectDetection(a )
model.to(a )
model.eval()
lowercase__ : Dict = model(pixel_values=a )
lowercase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : str = model(pixel_values=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : Union[str, Any] = False
def _UpperCAmelCase ( self , a , a , a=False ) -> Dict:
lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[Any] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : Dict = {}
lowercase__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=a , dtype=torch.long )
lowercase__ : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=a , dtype=torch.float )
labels.append(a )
lowercase__ : Union[str, Any] = labels
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : Dict = YolosModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
# YOLOS does not use inputs_embeds
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(a )
lowercase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
# in YOLOS, the seq_len is different
lowercase__ : Tuple = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : str = True
lowercase__ : List[str] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : str = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Optional[int] = True
lowercase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Dict = len(a )
# Check attention is always last and order is fine
lowercase__ : Any = True
lowercase__ : int = True
lowercase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(a , a ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowercase__ : Tuple = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(a , a , a ):
lowercase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(a , a ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a ) , a )
# YOLOS has a different seq_length
lowercase__ : Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[Any] = True
check_hidden_states_output(a , a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : int = YolosModel.from_pretrained(a )
self.assertIsNotNone(a )
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : Optional[int] = prepare_img()
lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ : int = model(inputs.pixel_values )
# verify outputs
lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , )
lowercase__ : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) )
# verify postprocessing
lowercase__ : Optional[Any] = image_processor.post_process_object_detection(
a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a )
lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7]
lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a )
self.assertEqual(len(results['scores'] ) , 5 )
self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) )
self.assertSequenceEqual(results['labels'].tolist() , a )
self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
| 645
| 0
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
SCREAMING_SNAKE_CASE__ : Any = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = CamembertTokenizer
lowercase_ = CamembertTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Tuple )-> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Dict = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '<pad>'
SCREAMING_SNAKE_CASE__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a_ ) , 1004 )
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def __lowercase( self : List[Any] )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(a_ , add_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize(a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def __lowercase( self : List[str] )-> Dict:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE__ : str = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
| 85
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def __snake_case ( self : List[str]) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any]) -> List[str]:
A_ = (3, 32, 128)
A_ = tempfile.mkdtemp()
# fmt: off
A_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
A_ = dict(zip(_lowercase , range(len(_lowercase))))
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_lowercase) + '\n')
A_ = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
A_ = os.path.join(self.tmpdirname , _lowercase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_lowercase , _lowercase)
def __snake_case ( self : int , **_lowercase : Optional[int]) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : Optional[int] , **_lowercase : Optional[int]) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase)
def __snake_case ( self : Dict) -> str:
shutil.rmtree(self.tmpdirname)
def __snake_case ( self : Union[str, Any]) -> Any:
A_ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
A_ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1))
return image_input
def __snake_case ( self : Optional[Any]) -> List[Any]:
A_ = self.get_tokenizer()
A_ = self.get_image_processor()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
A_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __snake_case ( self : Union[str, Any]) -> Optional[Any]:
A_ = self.get_tokenizer()
A_ = self.get_image_processor()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
A_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
A_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
A_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __snake_case ( self : List[Any]) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = self.prepare_image_inputs()
A_ = image_processor(_lowercase , return_tensors='np')
A_ = processor(images=_lowercase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def __snake_case ( self : Any) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = 'test'
A_ = processor(text=_lowercase)
A_ = tokenizer(_lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __snake_case ( self : str) -> Dict:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = 'test'
A_ = self.prepare_image_inputs()
A_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __snake_case ( self : Union[str, Any]) -> Optional[int]:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.char_decode(_lowercase)
A_ = tokenizer.batch_decode(_lowercase)
A_ = [seq.replace(' ' , '') for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase)
def __snake_case ( self : List[str]) -> str:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = None
A_ = self.prepare_image_inputs()
A_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def __snake_case ( self : List[str]) -> Any:
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
A_ = torch.randn(1 , 27 , 38)
A_ = torch.randn(1 , 27 , 50_257)
A_ = torch.randn(1 , 27 , 30_522)
A_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 366
| 0
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] = "cpu" , _lowerCAmelCase : Optional[Any] = None ) -> Union[str, Any]:
UpperCAmelCase : Dict = torch.load(A_ , map_location=A_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(A_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
UpperCAmelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase : Dict = src_path
torch.save(A_ , A_ )
if __name__ == "__main__":
fire.Fire(convert)
| 710
|
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def snake_case_ ( ) -> Optional[int]:
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_lowerCAmelCase , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_lowerCAmelCase , default=5 )
parser.add_argument('''--batch_size''' , type=_lowerCAmelCase , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_lowerCAmelCase , default=1 )
parser.add_argument('''--freeze''' , type=_lowerCAmelCase , default=_lowerCAmelCase )
parser.add_argument('''--learning_rate''' , type=_lowerCAmelCase , default=5e-4 )
parser.add_argument('''--seed''' , type=_lowerCAmelCase , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_lowerCAmelCase , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_lowerCAmelCase , default=10 )
parser.add_argument('''--weight_decay''' , type=_lowerCAmelCase , default=0.0_1 )
parser.add_argument('''--output_dir''' , type=_lowerCAmelCase , default='''./results''' )
return parser.parse_args()
UpperCamelCase__: Any = load("accuracy")
def snake_case_ ( _lowerCAmelCase : int ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = eval_pred
UpperCAmelCase : Optional[Any] = np.argmax(_lowerCAmelCase , axis=1 )
return metric.compute(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : Optional[int] ) -> None:
super().__init__()
UpperCAmelCase : Dict = trainer
def A ( self : str , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any , **__snake_case : List[Any] ) -> Dict:
if control.should_evaluate:
UpperCAmelCase : Optional[int] = deepcopy(__snake_case )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def snake_case_ ( ) -> List[Any]:
UpperCAmelCase : Tuple = get_args()
set_seed(args.seed )
UpperCAmelCase : List[str] = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
UpperCAmelCase : Dict = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase : Dict = train_test['''test'''].train_test_split(test_size=0.5 )
UpperCAmelCase : List[Any] = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase : Optional[int] = tokenizer.eos_token
UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCAmelCase : List[Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_lowerCAmelCase : Any ):
UpperCAmelCase : List[Any] = tokenizer(example['''src'''] , truncation=_lowerCAmelCase , max_length=1024 )
UpperCAmelCase : Optional[Any] = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase : List[str] = train_test_validation.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=train_test_validation['''train'''].column_names , )
UpperCAmelCase : int = DataCollatorWithPadding(tokenizer=_lowerCAmelCase )
UpperCAmelCase : Tuple = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
UpperCAmelCase : int = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 528
| 0
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE( a_ ):
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(UpperCamelCase , 'num_heads' ) )
class __SCREAMING_SNAKE_CASE:
def __init__( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any]=13 , UpperCamelCase: List[str]=64 , UpperCamelCase: Optional[int]=3 , UpperCamelCase: Optional[Any]=[16, 48, 96] , UpperCamelCase: Optional[Any]=[1, 3, 6] , UpperCamelCase: str=[1, 2, 10] , UpperCamelCase: Dict=[7, 3, 3] , UpperCamelCase: Optional[Any]=[4, 2, 2] , UpperCamelCase: Any=[2, 1, 1] , UpperCamelCase: Optional[Any]=[2, 2, 2] , UpperCamelCase: Tuple=[False, False, True] , UpperCamelCase: Tuple=[0.0, 0.0, 0.0] , UpperCamelCase: Any=0.02 , UpperCamelCase: int=1e-12 , UpperCamelCase: Any=True , UpperCamelCase: List[str]=True , UpperCamelCase: int=2 , ) -> Dict:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_sizes
snake_case__ = patch_stride
snake_case__ = patch_padding
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = num_labels
snake_case__ = num_channels
snake_case__ = embed_dim
snake_case__ = num_heads
snake_case__ = stride_kv
snake_case__ = depth
snake_case__ = cls_token
snake_case__ = attention_drop_rate
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int ) -> Union[str, Any]:
snake_case__ = CvtModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase )
snake_case__ = (self.image_size, self.image_size)
snake_case__ , snake_case__ = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case__ = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case__ = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[int] ) -> Optional[Any]:
snake_case__ = self.num_labels
snake_case__ = CvtForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = CvtModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Dict ) -> List[str]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCAmelCase_ ( self: Tuple ) -> Tuple:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCAmelCase_ ( self: Any ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Any ) -> str:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(UpperCamelCase )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> List[Any]:
def check_hidden_states_output(UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] ):
snake_case__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
snake_case__ = outputs.hidden_states
snake_case__ = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: Any ) -> List[Any]:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self: int ) -> Tuple:
pass
@slow
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = CvtModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case__ = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=UpperCamelCase , return_tensors='pt' ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ = model(**UpperCamelCase )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
snake_case__ = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
| 328
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self: int , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=7 , UpperCamelCase: List[Any]=3 , UpperCamelCase: List[Any]=30 , UpperCamelCase: List[Any]=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=None , UpperCamelCase: Tuple=True , UpperCamelCase: List[str]=[0.5, 0.5, 0.5] , UpperCamelCase: Dict=[0.5, 0.5, 0.5] , UpperCamelCase: Tuple=True , UpperCamelCase: List[str]=1 / 2_55 , UpperCamelCase: Union[str, Any]=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_pad
def lowerCAmelCase_ ( self: Dict ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self: Any , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=False ) -> int:
if not batched:
snake_case__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size['shortest_edge'] * h / w )
snake_case__ = self.size['shortest_edge']
elif w > h:
snake_case__ = self.size['shortest_edge']
snake_case__ = int(self.size['shortest_edge'] * w / h )
else:
snake_case__ = self.size['shortest_edge']
snake_case__ = self.size['shortest_edge']
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
snake_case__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case__ = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
snake_case__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self: str ) -> Any:
# prepare image and target
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
snake_case__ = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
snake_case__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase ) )
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
# prepare image, target and masks_path
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
snake_case__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case__ = ConditionalDetrImageProcessor(format='coco_panoptic' )
snake_case__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCamelCase )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase ) )
| 328
| 1
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
__magic_name__: Union[str, Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: List[str] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def lowerCamelCase__ ( self : Dict ) -> Dict:
__magic_name__: str = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__magic_name__: str = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
__magic_name__: Union[str, Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
__magic_name__: Optional[Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__magic_name__: List[str] = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
__magic_name__: List[Any] = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__magic_name__: Tuple = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase__ ( self : Any ) -> int:
__magic_name__: str = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
__magic_name__: Any = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
__magic_name__: int = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__magic_name__: str = """fp16"""
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
__magic_name__: Union[str, Any] = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
__magic_name__: Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
__magic_name__: Dict = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
__magic_name__: Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def lowerCamelCase__ ( self : Dict ) -> str:
__magic_name__: Dict = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__magic_name__: Any = """fp16"""
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
| 702
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = IFInpaintingSuperResolutionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
return self._get_superresolution_dummy_components()
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any=0 ) -> Dict:
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: int = torch.manual_seed(__snake_case )
else:
__magic_name__: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Tuple = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Dict ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : int ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 213
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __a( _a ):
"""simple docstring"""
@staticmethod
@abstractmethod
def a__ ( _SCREAMING_SNAKE_CASE ) -> str:
raise NotImplementedError()
@abstractmethod
def a__ ( self ) -> int:
raise NotImplementedError()
| 30
|
def _a ( lowerCAmelCase , lowerCAmelCase )-> List[str]:
SCREAMING_SNAKE_CASE_ = [1]
for i in range(2 , lowerCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = list(range(lowerCAmelCase ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ = factorials.pop()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = divmod(lowerCAmelCase , lowerCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
| 0
|
def A_ ( ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1_9_0_1
SCREAMING_SNAKE_CASE = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
SCREAMING_SNAKE_CASE = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
SCREAMING_SNAKE_CASE = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
SCREAMING_SNAKE_CASE = day - days_per_month[month - 2]
if month > 1_2:
year += 1
SCREAMING_SNAKE_CASE = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 259
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class a_( unittest.TestCase ):
"""simple docstring"""
def __UpperCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/'))
SCREAMING_SNAKE_CASE = self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase__ , 'src/transformers/models/bert/modeling_bert.py') , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py') , )
def __UpperCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
SCREAMING_SNAKE_CASE = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = os.path.join(self.transformer_dir , 'new_code.py')
with open(lowerCAmelCase__ , 'w' , newline='\n') as f:
f.write(lowerCAmelCase__)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase__)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase__)
with open(lowerCAmelCase__ , 'r') as f:
self.assertTrue(f.read() , lowerCAmelCase__)
def __UpperCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def __UpperCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowerCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowerCAmelCase__) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , lowerCAmelCase__ , lowerCAmelCase__) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowerCAmelCase__ , overwrite_result=re.sub('Bert' , 'TestModel' , lowerCAmelCase__) , )
def __UpperCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = check_copies.LOCALIZED_READMES['README_zh-hans.md']
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
self.assertFalse(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 259
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Any=7 , lowerCamelCase__ : str=3 , lowerCamelCase__ : Optional[Any]=1_8 , lowerCamelCase__ : Optional[int]=3_0 , lowerCamelCase__ : Optional[int]=4_0_0 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : List[Any]=3_2 , lowerCamelCase__ : List[str]=True , ):
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : str = image_size
lowerCAmelCase : Optional[Any] = min_resolution
lowerCAmelCase : str = max_resolution
lowerCAmelCase : Optional[Any] = do_resize
lowerCAmelCase : Tuple = size_divisor
lowerCAmelCase : Optional[int] = do_rescale
def _A ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __magic_name__ ( snake_case, unittest.TestCase ):
_lowerCAmelCase = GLPNImageProcessor if is_vision_available() else None
def _A ( self : List[Any] ):
lowerCAmelCase : List[str] = GLPNImageProcessingTester(self )
@property
def _A ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : Optional[Any] ):
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size_divisor''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''resample''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_rescale''' ) )
def _A ( self : List[str] ):
pass
def _A ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _A ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _A ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 348
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def _A ( *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ):
pass
def UpperCAmelCase__ ( __magic_name__ : Image ):
'''simple docstring'''
lowerCAmelCase : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def UpperCAmelCase__ ( __magic_name__ : Image ):
'''simple docstring'''
lowerCAmelCase : Tuple = np.array(__magic_name__ )
lowerCAmelCase : Dict = npimg.shape
return {"hash": hashimage(__magic_name__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
_lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _A ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int ):
lowerCAmelCase : List[str] = MaskGenerationPipeline(model=lowerCamelCase__ , image_processor=lowerCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _A ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _A ( self : Optional[int] ):
pass
@slow
@require_torch
def _A ( self : Optional[int] ):
lowerCAmelCase : Dict = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
lowerCAmelCase : Optional[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_5_6 )
# Shortening by hashing
lowerCAmelCase : List[Any] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_2_1},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_0_5_3},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_6_7},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_3},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_0_9},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_8_7_9},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_8_3_4},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_7_1_6},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_6_1_2},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_9_9},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_5_2},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_3_2},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_1_6},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_9_9},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_8_3},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_6_4},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_0_8},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_3_3_5},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_3_2_6},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_2_6_2},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_9_9},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_8_6},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_8_4},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_8_7_3},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def _A ( self : Any ):
lowerCAmelCase : List[str] = '''facebook/sam-vit-huge'''
lowerCAmelCase : List[str] = pipeline('''mask-generation''' , model=lowerCamelCase__ )
lowerCAmelCase : List[Any] = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
lowerCAmelCase : str = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_2_1_0},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_0_5_3},
] , )
| 348
| 1
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
__a = [False] * len(__SCREAMING_SNAKE_CASE )
__a = []
queue.append(__SCREAMING_SNAKE_CASE )
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__SCREAMING_SNAKE_CASE )
__a = True
__a = u
return visited[t]
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
__a = [-1] * (len(__SCREAMING_SNAKE_CASE ))
__a = 0
while bfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__a = float("""Inf""" )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(__SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
return max_flow
SCREAMING_SNAKE_CASE_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 201
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
__a = [int(__SCREAMING_SNAKE_CASE ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(__SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(__SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input().strip()
SCREAMING_SNAKE_CASE_ = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 201
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 42
class lowerCAmelCase ( snake_case__ , snake_case__ ):
'''simple docstring'''
A = True
@register_to_config
def __init__( self :str , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 3 , lowerCamelCase_ :Tuple[str] = ("DownEncoderBlock2D",) , lowerCamelCase_ :Tuple[str] = ("UpDecoderBlock2D",) , lowerCamelCase_ :Tuple[int] = (6_4,) , lowerCamelCase_ :int = 1 , lowerCamelCase_ :str = "silu" , lowerCamelCase_ :int = 4 , lowerCamelCase_ :int = 3_2 , lowerCamelCase_ :int = 3_2 , lowerCamelCase_ :float = 0.18_215 , ) -> Dict:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
# pass init params to Decoder
UpperCamelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , act_fn=lowerCamelCase_ , )
UpperCamelCase__ = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCamelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
UpperCamelCase__ = False
UpperCamelCase__ = False
# only relevant if vae tiling is enabled
UpperCamelCase__ = self.config.sample_size
UpperCamelCase__ = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCamelCase__ = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCamelCase__ = 0.25
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any]=False ) -> str:
"""simple docstring"""
if isinstance(lowerCamelCase_ , (Encoder, Decoder) ):
UpperCamelCase__ = value
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :bool = True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = use_tiling
def lowerCamelCase__ ( self :Optional[int] ) -> Dict:
"""simple docstring"""
self.enable_tiling(lowerCamelCase_ )
def lowerCamelCase__ ( self :Dict ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = True
def lowerCamelCase__ ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase__ ( self :Dict ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
UpperCamelCase__ = {}
def fn_recursive_add_processors(lowerCamelCase_ :str , lowerCamelCase_ :torch.nn.Module , lowerCamelCase_ :Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase_ , "set_processor" ):
UpperCamelCase__ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' , lowerCamelCase_ , lowerCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return processors
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Any:
"""simple docstring"""
UpperCamelCase__ = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(lowerCamelCase_ )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(lowerCamelCase_ :str , lowerCamelCase_ :torch.nn.Module , lowerCamelCase_ :List[str] ):
if hasattr(lowerCamelCase_ , "set_processor" ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
module.set_processor(lowerCamelCase_ )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' , lowerCamelCase_ , lowerCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCamelCase__ ( self :int , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase_ , return_dict=lowerCamelCase_ )
if self.use_slicing and x.shape[0] > 1:
UpperCamelCase__ = [self.encoder(lowerCamelCase_ ) for x_slice in x.split(1 )]
UpperCamelCase__ = torch.cat(lowerCamelCase_ )
else:
UpperCamelCase__ = self.encoder(lowerCamelCase_ )
UpperCamelCase__ = self.quant_conv(lowerCamelCase_ )
UpperCamelCase__ = DiagonalGaussianDistribution(lowerCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase_ )
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase_ , return_dict=lowerCamelCase_ )
UpperCamelCase__ = self.post_quant_conv(lowerCamelCase_ )
UpperCamelCase__ = self.decoder(lowerCamelCase_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
@apply_forward_hook
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
UpperCamelCase__ = [self._decode(lowerCamelCase_ ).sample for z_slice in z.split(1 )]
UpperCamelCase__ = torch.cat(lowerCamelCase_ )
else:
UpperCamelCase__ = self._decode(lowerCamelCase_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase_ )
def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = min(a.shape[2] , b.shape[2] , lowerCamelCase_ )
for y in range(lowerCamelCase_ ):
UpperCamelCase__ = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = min(a.shape[3] , b.shape[3] , lowerCamelCase_ )
for x in range(lowerCamelCase_ ):
UpperCamelCase__ = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
UpperCamelCase__ = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase__ = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCamelCase__ = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCamelCase__ = []
for i in range(0 , x.shape[2] , lowerCamelCase_ ):
UpperCamelCase__ = []
for j in range(0 , x.shape[3] , lowerCamelCase_ ):
UpperCamelCase__ = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCamelCase__ = self.encoder(lowerCamelCase_ )
UpperCamelCase__ = self.quant_conv(lowerCamelCase_ )
row.append(lowerCamelCase_ )
rows.append(lowerCamelCase_ )
UpperCamelCase__ = []
for i, row in enumerate(lowerCamelCase_ ):
UpperCamelCase__ = []
for j, tile in enumerate(lowerCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase__ = self.blend_v(rows[i - 1][j] , lowerCamelCase_ , lowerCamelCase_ )
if j > 0:
UpperCamelCase__ = self.blend_h(row[j - 1] , lowerCamelCase_ , lowerCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase_ , dim=3 ) )
UpperCamelCase__ = torch.cat(lowerCamelCase_ , dim=2 )
UpperCamelCase__ = DiagonalGaussianDistribution(lowerCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase_ )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
UpperCamelCase__ = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCamelCase__ = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCamelCase__ = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCamelCase__ = []
for i in range(0 , z.shape[2] , lowerCamelCase_ ):
UpperCamelCase__ = []
for j in range(0 , z.shape[3] , lowerCamelCase_ ):
UpperCamelCase__ = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCamelCase__ = self.post_quant_conv(lowerCamelCase_ )
UpperCamelCase__ = self.decoder(lowerCamelCase_ )
row.append(lowerCamelCase_ )
rows.append(lowerCamelCase_ )
UpperCamelCase__ = []
for i, row in enumerate(lowerCamelCase_ ):
UpperCamelCase__ = []
for j, tile in enumerate(lowerCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCamelCase__ = self.blend_v(rows[i - 1][j] , lowerCamelCase_ , lowerCamelCase_ )
if j > 0:
UpperCamelCase__ = self.blend_h(row[j - 1] , lowerCamelCase_ , lowerCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase_ , dim=3 ) )
UpperCamelCase__ = torch.cat(lowerCamelCase_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :bool = False , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
UpperCamelCase__ = sample
UpperCamelCase__ = self.encode(lowerCamelCase_ ).latent_dist
if sample_posterior:
UpperCamelCase__ = posterior.sample(generator=lowerCamelCase_ )
else:
UpperCamelCase__ = posterior.mode()
UpperCamelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
| 516
|
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def snake_case__ ( _snake_case : List[Any]=32 , _snake_case : Tuple=10 , _snake_case : str=1_00 , _snake_case : Optional[int]=10_26 , _snake_case : Any=True , _snake_case : str="data/tokenized_stories_train_wikitext103.jbl" , _snake_case : Any="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
UpperCamelCase__ , UpperCamelCase__ = generate_datasets(
_snake_case , _snake_case , number=_snake_case , min_len=10_26 , trim=_snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
UpperCamelCase__ = load_gpta("gpt2" ).to(_snake_case )
print("computing perplexity on objective set" )
UpperCamelCase__ = compute_perplexity(_snake_case , _snake_case , _snake_case ).item()
print("perplexity on objective set:" , _snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def snake_case__ ( _snake_case : Any , _snake_case : str=15 , _snake_case : str=1_28 , _snake_case : int=1_00 , _snake_case : Tuple="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
UpperCamelCase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
UpperCamelCase__ = SecondaryLearner(_snake_case )
# Train secondary learner
UpperCamelCase__ = train_secondary_learner(
_snake_case , _snake_case , max_epochs=_snake_case , batch_size=_snake_case , eval_freq=1_00 , igf_model_path=_snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def snake_case__ ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : List[str]=32 , _snake_case : Tuple=10_00 , _snake_case : List[Any]=16 , _snake_case : str=1.0 , _snake_case : List[str]=recopy_gpta , _snake_case : Optional[int]=None , _snake_case : Optional[int]=10 , _snake_case : Optional[int]="gpt2_finetuned.pt" , ):
"""simple docstring"""
UpperCamelCase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
UpperCamelCase__ = RandomSampler(_snake_case )
UpperCamelCase__ = DataLoader(_snake_case , sampler=_snake_case )
UpperCamelCase__ = max_steps // (len(_snake_case )) + 1
UpperCamelCase__ = 0
UpperCamelCase__ = torch.zeros((1, context_len) , dtype=torch.long , device=_snake_case )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = recopy_model(_snake_case , _snake_case , _snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(_snake_case )
secondary_learner.eval()
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = []
UpperCamelCase__ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase__ = compute_perplexity(_snake_case , _snake_case , _snake_case )
test_perps.append(_snake_case )
print("Test perplexity, step" , _snake_case , ":" , _snake_case )
for epoch in range(int(_snake_case ) ):
for step, example in enumerate(_snake_case ):
torch.cuda.empty_cache()
UpperCamelCase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCamelCase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase__ = model(_snake_case , labels=_snake_case )
UpperCamelCase__ = True
if secondary_learner is not None:
UpperCamelCase__ = secondary_learner.forward(
torch.tensor(_snake_case , dtype=torch.long , device=_snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase__ = -1
if predicted_q < threshold:
UpperCamelCase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCamelCase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase__ = compute_perplexity(_snake_case , _snake_case , _snake_case )
test_perps.append(_snake_case )
print("Test perplexity, step" , _snake_case , ":" , _snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_snake_case , type=_snake_case , required=_snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_snake_case , default=_snake_case , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_snake_case , default=_snake_case , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_snake_case , type=_snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_snake_case , default=_snake_case , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=_snake_case , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_00 , type=_snake_case , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_00 , type=_snake_case , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=10_00 , type=_snake_case , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_28 , type=_snake_case , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_snake_case , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=_snake_case , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_00 , type=_snake_case , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=10_26 , type=_snake_case , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_snake_case , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=_snake_case , type=_snake_case , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=_snake_case , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_snake_case , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=_snake_case , type=_snake_case , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_snake_case , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
UpperCamelCase__ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
UpperCamelCase__ = training_secondary_learner(
_snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
UpperCamelCase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase__ , UpperCamelCase__ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_00 , min_len=10_26 , trim=_snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_snake_case , _snake_case , _snake_case , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_snake_case , secondary_learner=_snake_case , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 516
| 1
|
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase_ = TypeVar('_T')
class snake_case ( Generic[_T] ):
'''simple docstring'''
def __init__( self : Dict, _lowerCamelCase : Iterable[_T] | None = None ):
'''simple docstring'''
__A = list(iterable or [] )
__A = []
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : int ):
'''simple docstring'''
return f'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : _T ):
'''simple docstring'''
self._stacka.append(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self._stacka.pop
__A = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 718
|
"""simple docstring"""
lowercase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
lowercase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = start
# add current to visited
visited.append(__UpperCamelCase )
__A = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__A = topological_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(__UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
__A = topological_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
lowercase_ = topological_sort('a', [], [])
print(sort)
| 215
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """megatron-bert"""
def __init__(self , __a=29056 , __a=1024 , __a=24 , __a=16 , __a=4096 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=0 , __a="absolute" , __a=True , **__a , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
| 146
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = XLMRobertaTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = '<pad>'
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__a ) , 1002 )
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = XLMRobertaTokenizer(__a , keep_accents=__a )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(__a , **__a )
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCAmelCase__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = tokenizer_r.save_pretrained(__a , legacy_format=__a )
UpperCAmelCase__ = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ = tokenizer_r.from_pretrained(__a )
UpperCAmelCase__ = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@cached_property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__a , f.name )
UpperCAmelCase__ = XLMRobertaTokenizer(f.name , keep_accents=__a )
UpperCAmelCase__ = pickle.dumps(__a )
pickle.loads(__a )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = 'I was born in 92000, and this is falsé.'
UpperCAmelCase__ = tokenizer.tokenize(__a )
UpperCAmelCase__ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase__ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__a )
UpperCAmelCase__ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'Hello World!'
UpperCAmelCase__ = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCAmelCase__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = {'input_ids': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 146
| 1
|
def lowerCamelCase ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set )-> int:
"""simple docstring"""
a , a =len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
a =0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowerCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
_lowerCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
_lowerCamelCase = '''▁'''
class UpperCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Tuple = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase = None , **_lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
a ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
a =vocab_file
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
a ={"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
a =len(self.sp_model ) - 1
a ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a =[self.cls_token_id]
a =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self ):
return len(self.sp_model )
def lowerCAmelCase__ ( self ):
a ={self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a =self.sp_model.PieceToId(_lowerCAmelCase )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a =[]
a =""""""
a =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
a =True
a =[]
else:
current_sub_tokens.append(_lowerCAmelCase )
a =False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
a =self.__dict__.copy()
a =None
return state
def __setstate__( self , _lowerCAmelCase ):
a =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a ={}
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
a =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 321
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {'''vocab_file''': '''vocab.txt'''}
__lowerCAmelCase : Union[str, Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__lowerCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__lowerCAmelCase : Any = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ConvBertTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=True , _lowercase="[UNK]" , _lowercase="[SEP]" , _lowercase="[PAD]" , _lowercase="[CLS]" , _lowercase="[MASK]" , _lowercase=True , _lowercase=None , **_lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(_lowercase , normalizer_state.pop("""type""" ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : int = normalizer_class(**_lowercase )
snake_case_ : Optional[int] = do_lower_case
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
snake_case_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 58
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, **__snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()]
_UpperCamelCase = [x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
_UpperCamelCase = calculate_rouge(__snake_case, __snake_case, **__snake_case )
if save_path is not None:
save_json(__snake_case, __snake_case, indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 19
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Dict = 'rwkv'
A_ : List[str] = {'max_position_embeddings': 'context_length'}
def __init__( self : Union[str, Any] , __snake_case : Dict=50_277 , __snake_case : Any=1_024 , __snake_case : Optional[Any]=4_096 , __snake_case : Tuple=32 , __snake_case : Any=None , __snake_case : str=None , __snake_case : Tuple=1E-5 , __snake_case : Union[str, Any]=0 , __snake_case : List[Any]=0 , __snake_case : Optional[Any]=6 , __snake_case : str=False , __snake_case : int=True , **__snake_case : str , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Tuple = context_length
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ : List[Any] = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = rescale_every
UpperCAmelCase_ : Tuple = use_cache
UpperCAmelCase_ : Optional[int] = bos_token_id
UpperCAmelCase_ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 641
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__UpperCamelCase : Dict = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(snake_case__ )
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'rag'
A_ : Tuple = True
def __init__( self : int , __snake_case : List[str]=None , __snake_case : List[Any]=True , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=" / " , __snake_case : Any=" // " , __snake_case : Tuple=5 , __snake_case : Union[str, Any]=300 , __snake_case : Any=768 , __snake_case : Tuple=8 , __snake_case : int="wiki_dpr" , __snake_case : Optional[int]="train" , __snake_case : Tuple="compressed" , __snake_case : Optional[int]=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=False , __snake_case : str=False , __snake_case : Dict=0.0 , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : str=False , __snake_case : Optional[Any]=True , __snake_case : int=None , **__snake_case : str , ):
'''simple docstring'''
super().__init__(
bos_token_id=__snake_case , pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , is_encoder_decoder=__snake_case , prefix=__snake_case , vocab_size=__snake_case , **__snake_case , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase_ : Dict = kwargs.pop('''question_encoder''' )
UpperCAmelCase_ : List[Any] = question_encoder_config.pop('''model_type''' )
UpperCAmelCase_ : Any = kwargs.pop('''generator''' )
UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : int = AutoConfig.for_model(__snake_case , **__snake_case )
UpperCAmelCase_ : Any = reduce_loss
UpperCAmelCase_ : List[Any] = label_smoothing
UpperCAmelCase_ : Tuple = exclude_bos_score
UpperCAmelCase_ : int = do_marginalize
UpperCAmelCase_ : Tuple = title_sep
UpperCAmelCase_ : Union[str, Any] = doc_sep
UpperCAmelCase_ : Any = n_docs
UpperCAmelCase_ : Optional[int] = max_combined_length
UpperCAmelCase_ : Any = dataset
UpperCAmelCase_ : List[Any] = dataset_split
UpperCAmelCase_ : Union[str, Any] = index_name
UpperCAmelCase_ : List[str] = retrieval_vector_size
UpperCAmelCase_ : Optional[Any] = retrieval_batch_size
UpperCAmelCase_ : Optional[int] = passages_path
UpperCAmelCase_ : Optional[Any] = index_path
UpperCAmelCase_ : List[Any] = use_dummy_dataset
UpperCAmelCase_ : int = output_retrieved
UpperCAmelCase_ : int = do_deduplication
UpperCAmelCase_ : Optional[int] = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase_ : int = getattr(self.generator , '''forced_eos_token_id''' , __snake_case )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : str ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__snake_case )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Optional[int] = self.question_encoder.to_dict()
UpperCAmelCase_ : Dict = self.generator.to_dict()
UpperCAmelCase_ : Optional[Any] = self.__class__.model_type
return output
| 641
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowerCamelCase ( _snake_case : List[str] ):
'''simple docstring'''
lowercase__ = DPTConfig()
if "large" in checkpoint_url:
lowercase__ = 1_024
lowercase__ = 4_096
lowercase__ = 24
lowercase__ = 16
lowercase__ = [5, 11, 17, 23]
lowercase__ = [256, 512, 1_024, 1_024]
lowercase__ = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ = True
lowercase__ = 150
lowercase__ = "huggingface/label-files"
lowercase__ = "ade20k-id2label.json"
lowercase__ = json.load(open(cached_download(hf_hub_url(_snake_case ,_snake_case ,repo_type="dataset" ) ) ,"r" ) )
lowercase__ = {int(_snake_case ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase ( _snake_case : List[Any] ):
'''simple docstring'''
lowercase__ = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_snake_case ,_snake_case )
def lowerCamelCase ( _snake_case : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ = name.replace("pretrained.model" ,"dpt.encoder" )
if "pretrained.model" in name:
lowercase__ = name.replace("pretrained.model" ,"dpt.embeddings" )
if "patch_embed" in name:
lowercase__ = name.replace("patch_embed" ,"patch_embeddings" )
if "pos_embed" in name:
lowercase__ = name.replace("pos_embed" ,"position_embeddings" )
if "attn.proj" in name:
lowercase__ = name.replace("attn.proj" ,"attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase__ = name.replace("proj" ,"projection" )
if "blocks" in name:
lowercase__ = name.replace("blocks" ,"layer" )
if "mlp.fc1" in name:
lowercase__ = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ = name.replace("mlp.fc2" ,"output.dense" )
if "norm1" in name:
lowercase__ = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
lowercase__ = name.replace("norm2" ,"layernorm_after" )
if "scratch.output_conv" in name:
lowercase__ = name.replace("scratch.output_conv" ,"head" )
if "scratch" in name:
lowercase__ = name.replace("scratch" ,"neck" )
if "layer1_rn" in name:
lowercase__ = name.replace("layer1_rn" ,"convs.0" )
if "layer2_rn" in name:
lowercase__ = name.replace("layer2_rn" ,"convs.1" )
if "layer3_rn" in name:
lowercase__ = name.replace("layer3_rn" ,"convs.2" )
if "layer4_rn" in name:
lowercase__ = name.replace("layer4_rn" ,"convs.3" )
if "refinenet" in name:
lowercase__ = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ = name.replace(f'''refinenet{layer_idx}''' ,f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowercase__ = name.replace("out_conv" ,"projection" )
if "resConfUnit1" in name:
lowercase__ = name.replace("resConfUnit1" ,"residual_layer1" )
if "resConfUnit2" in name:
lowercase__ = name.replace("resConfUnit2" ,"residual_layer2" )
if "conv1" in name:
lowercase__ = name.replace("conv1" ,"convolution1" )
if "conv2" in name:
lowercase__ = name.replace("conv2" ,"convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess1.0.project.0" ,"neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess2.0.project.0" ,"neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess3.0.project.0" ,"neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess4.0.project.0" ,"neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess1.3" ,"neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase__ = name.replace("pretrained.act_postprocess1.4" ,"neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess2.3" ,"neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase__ = name.replace("pretrained.act_postprocess2.4" ,"neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess3.3" ,"neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess4.3" ,"neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase__ = name.replace("pretrained.act_postprocess4.4" ,"neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase__ = name.replace("pretrained" ,"dpt" )
if "bn" in name:
lowercase__ = name.replace("bn" ,"batch_norm" )
if "head" in name:
lowercase__ = name.replace("head" ,"head.head" )
if "encoder.norm" in name:
lowercase__ = name.replace("encoder.norm" ,"layernorm" )
if "auxlayer" in name:
lowercase__ = name.replace("auxlayer" ,"auxiliary_head.head" )
return name
def lowerCamelCase ( _snake_case : str ,_snake_case : Optional[Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowercase__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[: config.hidden_size, :]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Optional[Any] ,_snake_case : Dict ,_snake_case : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = get_dpt_config(_snake_case )
# load original state_dict from URL
lowercase__ = torch.hub.load_state_dict_from_url(_snake_case ,map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_snake_case )
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(_snake_case )
lowercase__ = val
# read in qkv matrices
read_in_q_k_v(_snake_case ,_snake_case )
# load HuggingFace model
lowercase__ = DPTForSemanticSegmentation(_snake_case ) if "ade" in checkpoint_url else DPTForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# Check outputs on an image
lowercase__ = 480 if "ade" in checkpoint_url else 384
lowercase__ = DPTImageProcessor(size=_snake_case )
lowercase__ = prepare_img()
lowercase__ = image_processor(_snake_case ,return_tensors="pt" )
# forward pass
lowercase__ = model(**_snake_case ).logits if "ade" in checkpoint_url else model(**_snake_case ).predicted_depth
# Assert logits
lowercase__ = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
lowercase__ = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(_snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3] ,_snake_case ,atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] ,_snake_case )
)
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="nielsr" ,commit_message="Add model" ,use_temp_dir=_snake_case ,)
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="nielsr" ,commit_message="Add image processor" ,use_temp_dir=_snake_case ,)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 267
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( _snake_case : float ,_snake_case : int ):
'''simple docstring'''
lowercase__ = u
for i in range(1 ,_snake_case ):
lowercase__ = temp * (u - i)
return temp
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = int(input("enter the numbers of values: " ) )
lowercase__ = []
for _ in range(_snake_case ):
y.append([] )
for i in range(_snake_case ):
for j in range(_snake_case ):
y[i].append(_snake_case )
lowercase__ = 0
print("enter the values of parameters in a list: " )
lowercase__ = list(map(_snake_case ,input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_snake_case ):
lowercase__ = float(input() )
lowercase__ = int(input("enter the value to interpolate: " ) )
lowercase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,_snake_case ):
for j in range(n - i ):
lowercase__ = y[j + 1][i - 1] - y[j][i - 1]
lowercase__ = y[0][0]
for i in range(1 ,_snake_case ):
summ += (ucal(_snake_case ,_snake_case ) * y[0][i]) / math.factorial(_snake_case )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 267
| 1
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=[0, 1, 2, 3] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : int = patch_size
SCREAMING_SNAKE_CASE__ : str = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_labels
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = scope
SCREAMING_SNAKE_CASE__ : List[Any] = out_indices
SCREAMING_SNAKE_CASE__ : Any = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : Any = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Any = num_patches + 1
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ (self ) -> Any:
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BeitModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = BeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : int = BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : str = BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : str = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = BeitForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Dict = False
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BeitModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(SCREAMING_SNAKE_CASE__ ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE__ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(SCREAMING_SNAKE_CASE__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE__ : int = model_class(SCREAMING_SNAKE_CASE__ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = _config_zero_init(SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = BeitModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values.to(SCREAMING_SNAKE_CASE__ )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE__ : str = torch.ones((1, 1_96) , dtype=torch.bool ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(pixel_values=SCREAMING_SNAKE_CASE__ , bool_masked_pos=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 ) )
@slow
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = self.default_image_processor
SCREAMING_SNAKE_CASE__ : str = prepare_img()
SCREAMING_SNAKE_CASE__ : Any = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ : str = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
SCREAMING_SNAKE_CASE__ : List[str] = 2_81
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([1.6881, -0.2787, 0.5901] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 23_96
self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
SCREAMING_SNAKE_CASE__ : Dict = model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE__ , size=6_40 , do_center_crop=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.open(ds[0]["""file"""] )
SCREAMING_SNAKE_CASE__ : int = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=SCREAMING_SNAKE_CASE__ , )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
SCREAMING_SNAKE_CASE__ : Any = model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE__ , size=6_40 , do_center_crop=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
SCREAMING_SNAKE_CASE__ : Dict = Image.open(ds[0]["""file"""] )
SCREAMING_SNAKE_CASE__ : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ , target_sizes=[(5_00, 3_00)] )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
| 545
|
"""simple docstring"""
import math
import os
import sys
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = """"""
try:
with open(_snake_case ,"""rb""" ) as binary_file:
SCREAMING_SNAKE_CASE__ : str = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : int = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
lexicon.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = last_match_id
if math.loga(_snake_case ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE__ : Optional[int] = """0""" + lexicon[curr_key]
SCREAMING_SNAKE_CASE__ : Tuple = bin(_snake_case )[2:]
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = {"""0""": """0""", """1""": """1"""}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = """""", """"""
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
for i in range(len(_snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : Dict = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_snake_case ,_snake_case ,_snake_case ,_snake_case )
index += 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon[curr_string]
result += last_match_id
return result
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = os.path.getsize(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = bin(_snake_case )[2:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = 8
try:
with open(_snake_case ,"""wb""" ) as opened_file:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 ,len(_snake_case ) ,_snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_snake_case ,2 ).to_bytes(1 ,byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = read_file_binary(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = compress_data(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = add_file_length(_snake_case ,_snake_case )
write_file_binary(_snake_case ,_snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 545
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_snake_case : Optional[int] = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger "
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=_a, generator=_a, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = generator.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=_a, generator=_a, guidance_scale=7.5, num_inference_steps=2, output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion", torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger "
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=_a, generator=_a, guidance_scale=7.5, num_inference_steps=50, output_type="numpy" ).images
__SCREAMING_SNAKE_CASE = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__SCREAMING_SNAKE_CASE = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 693
|
from __future__ import annotations
def lowercase_ ( __snake_case : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
snake_case__ :Union[str, Any] = nums[0]
snake_case__ :List[Any] = 0
for num in nums[1:]:
snake_case__ , snake_case__ :Optional[Any] = (
max_excluding + num,
max(__snake_case , __snake_case ),
)
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241
| 0
|
import os
def __lowercase( ) -> Tuple:
with open(os.path.dirname(__snake_case ) + '/grid.txt' ) as f:
__snake_case = [] # noqa: E741
for _ in range(20 ):
l.append([int(__snake_case ) for x in f.readline().split()] )
__snake_case = 0
# right
for i in range(20 ):
for j in range(17 ):
__snake_case = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__snake_case = temp
# down
for i in range(17 ):
for j in range(20 ):
__snake_case = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__snake_case = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__snake_case = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__snake_case = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
__snake_case = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__snake_case = temp
return maximum
if __name__ == "__main__":
print(solution())
| 345
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
class _lowerCamelCase (lowerCamelCase ):
lowercase__ = CLIPConfig
lowercase__ = ["""CLIPEncoderLayer"""]
def __init__( self , SCREAMING_SNAKE_CASE_ ):
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = CLIPVisionModelWithProjection(config.vision_config )
__snake_case = nn.Linear(config.vision_config.projection_dim , 1 )
__snake_case = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.5 , SCREAMING_SNAKE_CASE_=0.5 ):
__snake_case = self.vision_model(SCREAMING_SNAKE_CASE_ )[0]
__snake_case = self.p_head(SCREAMING_SNAKE_CASE_ )
__snake_case = nsfw_detected.flatten()
__snake_case = nsfw_detected > p_threshold
__snake_case = nsfw_detected.tolist()
if any(SCREAMING_SNAKE_CASE_ ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(SCREAMING_SNAKE_CASE_ ):
if nsfw_detected_:
__snake_case = np.zeros(images[idx].shape )
__snake_case = self.w_head(SCREAMING_SNAKE_CASE_ )
__snake_case = watermark_detected.flatten()
__snake_case = watermark_detected > w_threshold
__snake_case = watermark_detected.tolist()
if any(SCREAMING_SNAKE_CASE_ ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(SCREAMING_SNAKE_CASE_ ):
if watermark_detected_:
__snake_case = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 345
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 560
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = "SpeechT5FeatureExtractor"
lowerCAmelCase : Optional[Any] = "SpeechT5Tokenizer"
def __init__( self : Optional[int] ,_snake_case : Union[str, Any] ,_snake_case : str ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case ,_snake_case )
def __call__( self : str ,*_snake_case : str ,**_snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = kwargs.pop('''audio''' ,_snake_case )
lowercase__ : Union[str, Any] = kwargs.pop('''text''' ,_snake_case )
lowercase__ : Tuple = kwargs.pop('''text_target''' ,_snake_case )
lowercase__ : str = kwargs.pop('''audio_target''' ,_snake_case )
lowercase__ : Union[str, Any] = kwargs.pop('''sampling_rate''' ,_snake_case )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase__ : Optional[Any] = self.feature_extractor(_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
elif text is not None:
lowercase__ : Dict = self.tokenizer(_snake_case ,**_snake_case )
else:
lowercase__ : int = None
if audio_target is not None:
lowercase__ : Tuple = self.feature_extractor(audio_target=_snake_case ,*_snake_case ,sampling_rate=_snake_case ,**_snake_case )
lowercase__ : List[Any] = targets['''input_values''']
elif text_target is not None:
lowercase__ : int = self.tokenizer(_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = targets['''input_ids''']
else:
lowercase__ : List[str] = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : Tuple = labels
lowercase__ : Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ : str = decoder_attention_mask
return inputs
def UpperCAmelCase ( self : str ,*_snake_case : List[Any] ,**_snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = kwargs.pop('''input_values''' ,_snake_case )
lowercase__ : Any = kwargs.pop('''input_ids''' ,_snake_case )
lowercase__ : List[Any] = kwargs.pop('''labels''' ,_snake_case )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase__ : List[Any] = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
elif input_ids is not None:
lowercase__ : Union[str, Any] = self.tokenizer.pad(_snake_case ,**_snake_case )
else:
lowercase__ : int = None
if labels is not None:
if "input_ids" in labels or (isinstance(_snake_case ,_snake_case ) and "input_ids" in labels[0]):
lowercase__ : List[Any] = self.tokenizer.pad(_snake_case ,**_snake_case )
lowercase__ : Optional[int] = targets['''input_ids''']
else:
lowercase__ : int = self.feature_extractor.feature_size
lowercase__ : str = self.feature_extractor.num_mel_bins
lowercase__ : int = self.feature_extractor.pad(_snake_case ,*_snake_case ,**_snake_case )
lowercase__ : Tuple = feature_size_hack
lowercase__ : int = targets['''input_values''']
else:
lowercase__ : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
lowercase__ : str = labels
lowercase__ : str = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__ : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase ( self : Optional[int] ,*_snake_case : List[Any] ,**_snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[str] ,*_snake_case : Dict ,**_snake_case : str ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
| 560
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase : Tuple = []
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
_UpperCamelCase =1
solve(lowercase__ , row + 1 )
_UpperCamelCase =0
return False
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__lowerCamelCase : Dict = 8
__lowerCamelCase : List[Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 719
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =cva.getAffineTransform(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return cva.warpAffine(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCamelCase : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
__lowerCamelCase : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCamelCase , __lowerCamelCase : Any = gray_img.shape
# set different points to rotate image
__lowerCamelCase : Optional[Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCamelCase : Any = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCamelCase : int = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCamelCase : List[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCamelCase : int = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCamelCase : Optional[Any] = plt.figure(1)
__lowerCamelCase : Tuple = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 271
| 0
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase_ = {'''facebook/blenderbot_small-90M''': 512}
def snake_case ( A__ ):
UpperCAmelCase_ : str = set()
UpperCAmelCase_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Dict = char
UpperCAmelCase_ : List[Any] = set(A__ )
return pairs
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]="__start__" , lowerCAmelCase_ : str="__end__" , lowerCAmelCase_ : Union[str, Any]="__unk__" , lowerCAmelCase_ : Union[str, Any]="__null__" , **lowerCAmelCase_ : List[Any] , ) -> str:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : str = json.load(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ : str = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase_ : Any = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ : Any = {}
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : str ) -> str:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Dict = re.sub("([.,!?()])" , R" \1" , lowerCAmelCase_ )
UpperCAmelCase_ : int = re.sub("(')" , R" \1 " , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = re.sub(R"\s{2,}" , " " , lowerCAmelCase_ )
if "\n" in token:
UpperCAmelCase_ : Tuple = token.replace("\n" , " __newln__" )
UpperCAmelCase_ : Tuple = token.split(" " )
UpperCAmelCase_ : int = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
UpperCAmelCase_ : Any = token.lower()
UpperCAmelCase_ : List[str] = tuple(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase_ : List[Any] = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Dict = bigram
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Dict = 0
while i < len(lowerCAmelCase_ ):
try:
UpperCAmelCase_ : int = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
UpperCAmelCase_ : Any = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : Dict = tuple(lowerCAmelCase_ )
UpperCAmelCase_ : Any = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = "@@ ".join(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = word[:-4]
UpperCAmelCase_ : Optional[Any] = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = re.findall(R"\S+\n?" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(" " ) ) )
return split_tokens
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str ) -> int:
UpperCAmelCase_ : List[Any] = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] ) -> str:
UpperCAmelCase_ : List[str] = " ".join(lowerCAmelCase_ ).replace("@@ " , "" ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + "\n" )
UpperCAmelCase_ : Any = 0
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ : List[Any] = token_index
writer.write(" ".join(lowerCAmelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
| 95
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCamelCase ( _A , _A=False ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = OmegaConf.load(_A )
if display:
print(yaml.dump(OmegaConf.to_container(_A ) ) )
return config
def _UpperCamelCase ( _A , _A=None , _A=None ) -> List[Any]:
"""simple docstring"""
if conf_path is None:
_UpperCAmelCase = """./model_checkpoints/vqgan_only.yaml"""
_UpperCAmelCase = load_config(_A , display=_A )
_UpperCAmelCase = VQModel(**config.model.params )
if ckpt_path is None:
_UpperCAmelCase = """./model_checkpoints/vqgan_only.pt"""
_UpperCAmelCase = torch.load(_A , map_location=_A )
if ".ckpt" in ckpt_path:
_UpperCAmelCase = sd["""state_dict"""]
model.load_state_dict(_A , strict=_A )
model.to(_A )
del sd
return model
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = model.encode(_A )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
_UpperCAmelCase = model.decode(_A )
return xrec
def _UpperCamelCase ( _A , _A=False ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = string.rsplit(""".""" , 1 )
if reload:
_UpperCAmelCase = importlib.import_module(_A )
importlib.reload(_A )
return getattr(importlib.import_module(_A , package=_A ) , cls )
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def _UpperCamelCase ( _A , _A , _A=True , _A=True ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = instantiate_from_config(_A )
if sd is not None:
model.load_state_dict(_A )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCamelCase ( _A , _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
if ckpt:
_UpperCAmelCase = torch.load(_A , map_location="""cpu""" )
_UpperCAmelCase = pl_sd["""global_step"""]
print(F"""loaded model from global step {global_step}.""" )
else:
_UpperCAmelCase = {"""state_dict""": None}
_UpperCAmelCase = None
_UpperCAmelCase = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_A , eval_mode=_A )["""model"""]
return model, global_step
| 555
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE( snake_case_ : list ) ->list:
'''simple docstring'''
_lowercase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_lowercase : str = True
for i in range(0 , len(snake_case_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_lowercase , _lowercase : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
_lowercase : Optional[Any] = False
for i in range(1 , len(snake_case_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_lowercase , _lowercase : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_lowercase : Optional[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
lowerCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 411
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _SCREAMING_SNAKE_CASE( snake_case_ : str ) ->Optional[Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowercase : Optional[int] = model_type_to_module_name(snake_case_ )
_lowercase : Optional[Any] = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(snake_case_ , snake_case_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case_ , '''__name__''' , snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowercase : int = importlib.import_module('''transformers''' )
if hasattr(snake_case_ , snake_case_ ):
return getattr(snake_case_ , snake_case_ )
return None
def _SCREAMING_SNAKE_CASE( snake_case_ : Union[str, os.PathLike] , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : int , ) ->Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = get_file_from_repo(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(snake_case_ , encoding='''utf-8''' ) as reader:
return json.load(snake_case_ )
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : int ) -> Tuple:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase_ )
def __lowercase ( cls : str , UpperCamelCase_ : Dict , **UpperCamelCase_ : Any ) -> Tuple:
'''simple docstring'''
_lowercase : int = kwargs.pop('''config''' , UpperCamelCase_ )
_lowercase : Union[str, Any] = kwargs.pop('''trust_remote_code''' , UpperCamelCase_ )
_lowercase : str = True
_lowercase , _lowercase : int = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Any = config_dict.get('''image_processor_type''' , UpperCamelCase_ )
_lowercase : List[str] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowercase : str = config_dict.pop('''feature_extractor_type''' , UpperCamelCase_ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_lowercase : Any = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_lowercase : List[str] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_lowercase : List[str] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# It could be in `config.image_processor_type``
_lowercase : Optional[int] = getattr(UpperCamelCase_ , '''image_processor_type''' , UpperCamelCase_ )
if hasattr(UpperCamelCase_ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_lowercase : List[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_lowercase : int = image_processor_class_from_name(UpperCamelCase_ )
_lowercase : str = image_processor_auto_map is not None
_lowercase : List[str] = image_processor_class is not None or type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING
_lowercase : Tuple = resolve_trust_remote_code(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if has_remote_code and trust_remote_code:
_lowercase : Dict = get_class_from_dynamic_module(
UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : List[str] = kwargs.pop('''code_revision''' , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase_ ) in IMAGE_PROCESSOR_MAPPING:
_lowercase : List[str] = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase_ )]
return image_processor_class.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def __lowercase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase_ , UpperCamelCase_ )
| 411
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
A__ : str = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> Tuple:
__lowerCamelCase : Tuple = {}
with open(UpperCAmelCase_ , 'r' ) as file:
for line_number, line in enumerate(UpperCAmelCase_ ):
__lowerCamelCase : Any = line.strip()
if line:
__lowerCamelCase : Optional[int] = line.split()
__lowerCamelCase : List[Any] = line_number
__lowerCamelCase : int = words[0]
__lowerCamelCase : List[str] = value
return result
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ) -> Dict:
for attribute in key.split('.' ):
__lowerCamelCase : Optional[int] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase_ ):
__lowerCamelCase : Dict = PARAM_MAPPING[full_name.split('.' )[-1]]
__lowerCamelCase : str = 'param'
if weight_type is not None and weight_type != "param":
__lowerCamelCase : List[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
__lowerCamelCase : Any = hf_pointer
for attribute in hf_param_name.split('.' ):
__lowerCamelCase : Optional[int] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = shape_pointer.shape
# let's reduce dimension
__lowerCamelCase : Optional[int] = value[0]
else:
__lowerCamelCase : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCamelCase : List[str] = value
elif weight_type == "weight_g":
__lowerCamelCase : Tuple = value
elif weight_type == "weight_v":
__lowerCamelCase : List[Any] = value
elif weight_type == "bias":
__lowerCamelCase : Any = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__lowerCamelCase : Dict = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[str] = value
else:
__lowerCamelCase : Tuple = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ) -> Dict:
__lowerCamelCase : Dict = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase_ ):
__lowerCamelCase : Tuple = PARAM_MAPPING[full_name.split('.' )[-1]]
__lowerCamelCase : Optional[Any] = 'param'
if weight_type is not None and weight_type != "param":
__lowerCamelCase : List[str] = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowerCamelCase : str = '.'.join([key, hf_param_name] )
else:
__lowerCamelCase : Dict = key
__lowerCamelCase : str = value if 'lm_head' in full_key else value[0]
A__ : int = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=None ) -> List[Any]:
__lowerCamelCase : Any = False
for key, mapped_key in MAPPING.items():
__lowerCamelCase : str = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase : Dict = True
if "*" in mapped_key:
__lowerCamelCase : Union[str, Any] = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
__lowerCamelCase : Any = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
__lowerCamelCase : int = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase : Union[str, Any] = 'weight_v'
elif "bias" in name:
__lowerCamelCase : Any = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : str = 'weight'
else:
__lowerCamelCase : List[str] = None
if hf_dict is not None:
rename_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return is_used
return is_used
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ) -> Any:
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Union[str, Any] = fairseq_model.state_dict()
__lowerCamelCase : int = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase : Union[str, Any] = True
else:
__lowerCamelCase : Optional[int] = load_wavaveca_layer(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
__lowerCamelCase : Tuple = full_name.split('conv_layers.' )[-1]
__lowerCamelCase : str = name.split('.' )
__lowerCamelCase : Optional[int] = int(items[0] )
__lowerCamelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCamelCase : List[str] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCamelCase : Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCamelCase : List[str] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCamelCase : Union[str, Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=False ) -> Optional[Any]:
if config_path is not None:
__lowerCamelCase : str = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
else:
__lowerCamelCase : Any = WavaVecaConfig()
if is_seq_class:
__lowerCamelCase : List[str] = read_txt_into_dict(UpperCAmelCase_ )
__lowerCamelCase : List[Any] = idalabel
__lowerCamelCase : str = WavaVecaForSequenceClassification(UpperCAmelCase_ )
__lowerCamelCase : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
feature_extractor.save_pretrained(UpperCAmelCase_ )
elif is_finetuned:
if dict_path:
__lowerCamelCase : str = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase : Any = target_dict.pad_index
__lowerCamelCase : int = target_dict.bos_index
__lowerCamelCase : int = target_dict.eos_index
__lowerCamelCase : Optional[Any] = len(target_dict.symbols )
__lowerCamelCase : Tuple = os.path.join(UpperCAmelCase_ , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
__lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Optional[Any] = 1
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = WavaVecaCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase_ , )
__lowerCamelCase : List[str] = True if config.feat_extract_norm == 'layer' else False
__lowerCamelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
__lowerCamelCase : int = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
__lowerCamelCase : Any = WavaVecaForCTC(UpperCAmelCase_ )
else:
__lowerCamelCase : List[str] = WavaVecaForPreTraining(UpperCAmelCase_ )
if is_finetuned or is_seq_class:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__lowerCamelCase : Union[str, Any] = argparse.Namespace(task='audio_pretraining' )
__lowerCamelCase : Optional[int] = fairseq.tasks.setup_task(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase_ )
__lowerCamelCase : List[str] = model[0].eval()
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
A__ : int = parser.parse_args()
A__ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 13
|
'''simple docstring'''
import os
from collections.abc import Iterator
def __lowerCamelCase ( __lowerCAmelCase : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
snake_case = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("""./""" )
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Any:
return F'''{i * " "}*''' if i else "\n##"
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> str:
snake_case = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__lowerCAmelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCamelCase ( __lowerCAmelCase : str = "." ) -> None:
snake_case = """"""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
snake_case , snake_case = os.path.split(__lowerCAmelCase )
if filepath != old_path:
snake_case = print_path(__lowerCAmelCase , __lowerCAmelCase )
snake_case = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
snake_case = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(__lowerCAmelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 369
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_a = 250_004
_a = 250_020
@require_sentencepiece
@require_tokenizers
class __A ( _snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MBartaaTokenizer
lowerCAmelCase_ = MBartaaTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ = MBartaaTokenizer(snake_case_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = "<s>"
lowerCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(snake_case_ ) , 1_0_5_4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = MBartaaTokenizer(snake_case_ , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=snake_case_ )
lowerCamelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(snake_case_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCamelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
lowerCamelCase__ = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCamelCase__ = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {"input_ids": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
lowerCamelCase__ = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(snake_case_ )
lowerCamelCase__ = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(snake_case_ )
lowerCamelCase__ = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
lowerCamelCase__ = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(snake_case_ )
lowerCamelCase__ = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
lowerCamelCase__ = tokenizer_p.save_pretrained(snake_case_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ = tokenizer_r.from_pretrained(snake_case_ )
lowerCamelCase__ = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase_ = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
lowerCamelCase__ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCamelCase__ = 1
return cls
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertIn(snake_case_ , self.tokenizer.all_special_ids )
lowerCamelCase__ = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCamelCase__ = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
lowerCamelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertNotIn(self.tokenizer.eos_token , snake_case_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ["this is gunna be a long sentence " * 2_0]
assert isinstance(src_text[0] , snake_case_ )
lowerCamelCase__ = 1_0
lowerCamelCase__ = self.tokenizer(snake_case_ , max_length=snake_case_ , truncation=snake_case_ ).input_ids[0]
self.assertEqual(ids[0] , snake_case_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case_ )
lowerCamelCase__ = MBartaaTokenizer.from_pretrained(snake_case_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case_ )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case_ , return_tensors='''pt''' )
lowerCamelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case_ , truncation=snake_case_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCamelCase__ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer(self.src_text , padding=snake_case_ , truncation=snake_case_ , max_length=3 , return_tensors='''pt''' )
lowerCamelCase__ = self.tokenizer(
text_target=self.tgt_text , padding=snake_case_ , truncation=snake_case_ , max_length=1_0 , return_tensors='''pt''' )
lowerCamelCase__ = targets["input_ids"]
lowerCamelCase__ = shift_tokens_right(snake_case_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(snake_case_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 720
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 29
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = 1
__SCREAMING_SNAKE_CASE : Dict = 3
__SCREAMING_SNAKE_CASE : int = (32, 32)
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def a_ ( self ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def a_ ( self ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def a_ ( self ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__lowerCamelCase )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_cond_unet_upscale
__SCREAMING_SNAKE_CASE : List[str] = DDPMScheduler()
__SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(prediction_type="v_prediction" )
__SCREAMING_SNAKE_CASE : List[str] = self.dummy_vae
__SCREAMING_SNAKE_CASE : Dict = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE : Tuple = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__SCREAMING_SNAKE_CASE : int = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
__SCREAMING_SNAKE_CASE : List[Any] = output.images
__SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Any = self.dummy_cond_unet_upscale
__SCREAMING_SNAKE_CASE : Union[str, Any] = DDPMScheduler()
__SCREAMING_SNAKE_CASE : Dict = DDIMScheduler(prediction_type="v_prediction" )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_vae
__SCREAMING_SNAKE_CASE : Dict = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__SCREAMING_SNAKE_CASE : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE : List[str] = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : Any = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images
assert image.shape[0] == 2
__SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.dummy_cond_unet_upscale
__SCREAMING_SNAKE_CASE : Dict = DDPMScheduler()
__SCREAMING_SNAKE_CASE : int = DDIMScheduler(prediction_type="v_prediction" )
__SCREAMING_SNAKE_CASE : Any = self.dummy_vae
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : Tuple = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__SCREAMING_SNAKE_CASE : str = unet.half()
__SCREAMING_SNAKE_CASE : List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE : Tuple = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
__SCREAMING_SNAKE_CASE : Dict = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , ).images
__SCREAMING_SNAKE_CASE : List[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
__SCREAMING_SNAKE_CASE : str = '''stabilityai/stable-diffusion-x4-upscaler'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Optional[Any] = '''a cat sitting on a park bench'''
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , )
__SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__SCREAMING_SNAKE_CASE : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
__SCREAMING_SNAKE_CASE : List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : str = '''a cat sitting on a park bench'''
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , )
__SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def a_ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__SCREAMING_SNAKE_CASE : Any = '''stabilityai/stable-diffusion-x4-upscaler'''
__SCREAMING_SNAKE_CASE : List[str] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Any = '''a cat sitting on a park bench'''
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type="np" , )
__SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 211
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''', [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1_337, num_examples=42, dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=1_337, num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
], )
def a__ ( UpperCamelCase_ : SplitDict ):
UpperCAmelCase__ :List[Any] = split_dict._to_yaml_list()
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCAmelCase__ :Tuple = SplitDict._from_yaml_list(UpperCamelCase_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase__ :Union[str, Any] = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase__ :Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''', [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase_ ), SplitInfo(dataset_name='''my_dataset''' )] )
def a__ ( UpperCamelCase_ : List[Any] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCAmelCase__ :List[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 467
| 0
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 600
|
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCAmelCase: str = """base_with_context"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
_lowercase : Any = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowercase : Optional[Any] = weights[F"""layers_{lyr_num}"""]
_lowercase : str = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_lowercase : Optional[Any] = ly_weight["""attention"""]
_lowercase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowercase : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : int = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
_lowercase : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowercase : int = weights[F"""layers_{lyr_num}"""]
_lowercase : Any = ly_weight["""attention"""]
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowercase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
_lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
_lowercase : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
_lowercase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowercase : List[Any] = weights[F"""layers_{lyr_num}"""]
_lowercase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
_lowercase : int = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_lowercase : List[Any] = ly_weight["""self_attention"""]
_lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowercase : Union[str, Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
_lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
_lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
_lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
_lowercase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
_lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
_lowercase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
_lowercase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
_lowercase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
_lowercase : str = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
_lowercase : Any = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowercase : List[Any] = jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
_lowercase : int = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
_lowercase : List[Any] = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
_lowercase : Optional[int] = inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
_lowercase : Optional[Any] = inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
_lowercase : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
_lowercase : List[str] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_lowercase : Dict = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
_lowercase : int = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowercase : str = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCAmelCase )
_lowercase : Dict = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCAmelCase )
_lowercase : List[str] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCAmelCase )
_lowercase : Any = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
_lowercase : str = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
UpperCAmelCase: Optional[Any] = parser.parse_args()
main(args)
| 600
| 1
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCamelCase_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Optional[int]:
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=__lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Union[str, Any]:
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=__lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Union[str, Any]:
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase = self.get_dummy_inputs(__lowercase )
lowercase = inputs["prompt"]
lowercase = inputs["generator"]
lowercase = inputs["num_inference_steps"]
lowercase = inputs["output_type"]
if "image" in inputs:
lowercase = inputs["image"]
else:
lowercase = None
if "mask_image" in inputs:
lowercase = inputs["mask_image"]
else:
lowercase = None
if "original_image" in inputs:
lowercase = inputs["original_image"]
else:
lowercase = None
lowercase , lowercase = pipe.encode_prompt(__lowercase )
# inputs with prompt converted to embeddings
lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowercase , __lowercase , __lowercase )
lowercase = pipe(**__lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowercase )
lowercase = self.pipeline_class.from_pretrained(__lowercase )
pipe_loaded.to(__lowercase )
pipe_loaded.set_progress_bar_config(disable=__lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowercase , __lowercase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase = self.get_dummy_inputs(__lowercase )
lowercase = inputs["generator"]
lowercase = inputs["num_inference_steps"]
lowercase = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
lowercase = pipe_loaded(**__lowercase )[0]
lowercase = np.abs(to_np(__lowercase ) - to_np(__lowercase ) ).max()
self.assertLess(__lowercase , 1E-4 )
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Dict:
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase = self.get_dummy_inputs(__lowercase )
lowercase = pipe(**__lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowercase )
lowercase = self.pipeline_class.from_pretrained(__lowercase )
pipe_loaded.to(__lowercase )
pipe_loaded.set_progress_bar_config(disable=__lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase = self.get_dummy_inputs(__lowercase )
lowercase = pipe_loaded(**__lowercase )[0]
lowercase = np.abs(to_np(__lowercase ) - to_np(__lowercase ) ).max()
self.assertLess(__lowercase , 1E-4 )
| 441
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = MobileBertConfig.from_json_file(_A )
print(f"Building PyTorch model from configuration: {config}" )
snake_case_ = MobileBertForPreTraining(_A )
# Load weights from tf checkpoint
snake_case_ = load_tf_weights_in_mobilebert(_A , _A , _A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 376
| 0
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = R'''\w+[.]\d+'''
__SCREAMING_SNAKE_CASE : Optional[int] = re.findall(snake_case , snake_case )
for pat in pats:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(snake_case , '''_'''.join(pat.split('''.''' ) ) )
return key
def a__ ( snake_case , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__SCREAMING_SNAKE_CASE : Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__SCREAMING_SNAKE_CASE : int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( snake_case , snake_case , snake_case=42 ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__SCREAMING_SNAKE_CASE : Dict = flax_model.init_weights(PRNGKey(snake_case ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = flatten_dict(snake_case )
__SCREAMING_SNAKE_CASE : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Dict = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE : Optional[int] = rename_key_and_reshape_tensor(snake_case , snake_case , snake_case )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE : Any = jnp.asarray(snake_case )
return unflatten_dict(snake_case )
| 712
|
from math import pi, sqrt
def a__ ( snake_case ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(snake_case ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def a__ ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = 1.0
while num:
lowercase_ = float(input("""Gamma of: """))
print(f'''gamma({num}) = {gamma(num)}''')
print("""\nEnter 0 to exit...""")
| 131
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase__ ( UpperCAmelCase_ : Sequence[float] , UpperCAmelCase_ : bool = False ) -> float:
if not arr:
return 0
__lowerCamelCase : str = 0 if allow_empty_subarrays else float('-inf' )
__lowerCamelCase : Optional[Any] = 0.0
for num in arr:
__lowerCamelCase : Optional[Any] = max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowerCamelCase : str = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A__ : Tuple = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 13
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A__ : str = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
A__ : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
__lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def __getstate__( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.__dict__.copy()
__lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = d
__lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13
| 1
|
_snake_case : Union[str, Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def lowerCAmelCase_ ( snake_case_ ):
assert type(snake_case__ ) in (int, float) and decimal == int(snake_case__ )
_A : Any = int(snake_case__ )
_A : Any = """"""
_A : Tuple = False
if decimal < 0:
_A : List[str] = True
decimal *= -1
while decimal > 0:
_A , _A : int = divmod(snake_case__,16 )
_A : List[str] = values[remainder] + hexadecimal
_A : List[str] = """0x""" + hexadecimal
if negative:
_A : Tuple = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase ( tf.keras.layers.Layer ):
def __init__( self , _a , _a , _a = None , _a = None ) -> Any:
super().__init__()
_A : Dict = pad_token_id
_A : List[Any] = max_length
_A : Optional[int] = vocab
_A : Optional[int] = merges
_A : Optional[int] = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> str:
_A : Any = [""" """.join(_a ) for m in tokenizer.bpe_ranks.keys()]
_A : str = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def a__ ( cls , _a , *_a , **_a ) -> List[Any]:
_A : Union[str, Any] = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def a__ ( cls , _a ) -> Union[str, Any]:
return cls(**_a )
def a__ ( self ) -> Union[str, Any]:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a__ ( self , _a , _a = None ) -> int:
_A : Optional[int] = self.tf_tokenizer(_a )
_A : Tuple = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
_A : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
_A , _A : Dict = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 54
| 0
|
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE__ : Tuple = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __lowercase ( snake_case ):
"""simple docstring"""
re.sub('''<n>''', '''''', snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=3_2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , _UpperCamelCase=[2, 2, 3, 2] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=1_0 , _UpperCamelCase=0.02 , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=[2, 3, 4] , _UpperCamelCase=None , ) -> str:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : List[Any] = num_stages
UpperCAmelCase_ : str = hidden_sizes
UpperCAmelCase_ : Any = depths
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[str] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
UpperCAmelCase_ : List[Any] = scope
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Any = ConvNextModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : List[str] = ConvNextForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Any = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : List[str] = ConvNextBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Tuple = ConvNextBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_snake_case : int = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_snake_case : Any = True
_snake_case : Optional[int] = False
_snake_case : Optional[int] = False
_snake_case : Union[str, Any] = False
_snake_case : List[str] = False
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Dict = ConvNextModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> List[str]:
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_UpperCamelCase )
UpperCAmelCase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : str = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Dict = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : int = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> int:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = ConvNextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> int:
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_UpperCamelCase )
# verify the logits
UpperCAmelCase_ : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : int = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
@require_torch
class lowerCamelCase (unittest.TestCase , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = (ConvNextBackbone,) if is_torch_available() else ()
_snake_case : Union[str, Any] = ConvNextConfig
_snake_case : Tuple = False
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = ConvNextModelTester(self )
| 406
| 0
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
A_ : Dict =(7_2_0, 1_2_8_0) # Height, Width
A_ : Any =(0.4, 0.6) # if height or width lower than this scale, drop it.
A_ : Any =1 / 1_0_0
A_ : int =""""""
A_ : Optional[int] =""""""
A_ : List[str] =""""""
A_ : List[str] =2_5_0
def SCREAMING_SNAKE_CASE_ ( )-> None:
_lowerCamelCase , _lowerCamelCase = get_dataset(snake_case , snake_case )
for index in range(snake_case ):
_lowerCamelCase = random.sample(range(len(snake_case ) ) , 4 )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = update_image_and_anno(
snake_case , snake_case , snake_case , snake_case , snake_case , filter_scale=snake_case , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCamelCase = random_chars(32 )
_lowerCamelCase = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCamelCase = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
_lowerCamelCase = []
for anno in new_annos:
_lowerCamelCase = anno[3] - anno[1]
_lowerCamelCase = anno[4] - anno[2]
_lowerCamelCase = anno[1] + width / 2
_lowerCamelCase = anno[2] + height / 2
_lowerCamelCase = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(snake_case )
with open(f'{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str )-> tuple[list, list]:
_lowerCamelCase = []
_lowerCamelCase = []
for label_file in glob.glob(os.path.join(snake_case , '*.txt' ) ):
_lowerCamelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(snake_case ) as in_file:
_lowerCamelCase = in_file.readlines()
_lowerCamelCase = os.path.join(snake_case , f'{label_name}.jpg' )
_lowerCamelCase = []
for obj_list in obj_lists:
_lowerCamelCase = obj_list.rstrip('\n' ).split(' ' )
_lowerCamelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCamelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCamelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCamelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case )
labels.append(snake_case )
return img_paths, labels
def SCREAMING_SNAKE_CASE_ ( snake_case : list , snake_case : list , snake_case : list[int] , snake_case : tuple[int, int] , snake_case : tuple[float, float] , snake_case : float = 0.0 , )-> tuple[list, list, str]:
_lowerCamelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCamelCase = int(scale_x * output_size[1] )
_lowerCamelCase = int(scale_y * output_size[0] )
_lowerCamelCase = []
_lowerCamelCase = []
for i, index in enumerate(snake_case ):
_lowerCamelCase = all_img_list[index]
path_list.append(snake_case )
_lowerCamelCase = all_annos[index]
_lowerCamelCase = cva.imread(snake_case )
if i == 0: # top-left
_lowerCamelCase = cva.resize(snake_case , (divid_point_x, divid_point_y) )
_lowerCamelCase = img
for bbox in img_annos:
_lowerCamelCase = bbox[1] * scale_x
_lowerCamelCase = bbox[2] * scale_y
_lowerCamelCase = bbox[3] * scale_x
_lowerCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCamelCase = cva.resize(snake_case , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCamelCase = img
for bbox in img_annos:
_lowerCamelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCamelCase = bbox[2] * scale_y
_lowerCamelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCamelCase = cva.resize(snake_case , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCamelCase = img
for bbox in img_annos:
_lowerCamelCase = bbox[1] * scale_x
_lowerCamelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCamelCase = bbox[3] * scale_x
_lowerCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCamelCase = cva.resize(
snake_case , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCamelCase = img
for bbox in img_annos:
_lowerCamelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCamelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCamelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCamelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> str:
assert number_char > 1, "The number of character should greater than 1"
_lowerCamelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case ) for _ in range(snake_case ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 222
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __a ( lowerCAmelCase__ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=16 , a__=2 , a__=0.02 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = relative_attention
_lowerCamelCase = position_biased_input
_lowerCamelCase = pos_att_type
_lowerCamelCase = scope
def snake_case_ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_token_type_ids:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case_ ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCamelCase = model(a__ , token_type_ids=a__ )[0]
_lowerCamelCase = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCamelCase = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) = config_and_inputs
_lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def snake_case_ ( self ):
_lowerCamelCase = DebertaVaModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def snake_case_ ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def snake_case_ ( self ):
pass
@slow
def snake_case_ ( self ):
_lowerCamelCase = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
_lowerCamelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCamelCase = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 222
| 1
|
'''simple docstring'''
def a__ ( lowercase : int = 1, lowercase : int = 1000 ) -> int:
"""simple docstring"""
_UpperCamelCase = 1
_UpperCamelCase = 0
for divide_by_number in range(lowercase, digit + 1 ):
_UpperCamelCase = []
_UpperCamelCase = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase ):
_UpperCamelCase = len(lowercase )
_UpperCamelCase = divide_by_number
else:
has_been_divided.append(lowercase )
_UpperCamelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
| 1
|
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
| 686
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.