code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : str = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase : Dict):
UpperCamelCase = '''huggingface/label-files'''
UpperCamelCase = '''imagenet-1k-id2label.json'''
UpperCamelCase = json.load(open(hf_hub_download(_UpperCAmelCase, _UpperCAmelCase, repo_type='''dataset'''), '''r'''))
UpperCamelCase = {int(_UpperCAmelCase): v for k, v in idalabel.items()}
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCamelCase = BitConfig(
conv_layer=_UpperCAmelCase, num_labels=1000, idalabel=_UpperCAmelCase, labelaid=_UpperCAmelCase, )
return config
def __snake_case ( _UpperCAmelCase : str):
if "stem.conv" in name:
UpperCamelCase = name.replace('''stem.conv''', '''bit.embedder.convolution''')
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''', '''layers''')
if "head.fc" in name:
UpperCamelCase = name.replace('''head.fc''', '''classifier.1''')
if name.startswith('''norm'''):
UpperCamelCase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
UpperCamelCase = '''bit.encoder.''' + name
return name
def __snake_case ( ):
UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase).raw)
return im
@torch.no_grad()
def __snake_case ( _UpperCAmelCase : str, _UpperCAmelCase : Any, _UpperCAmelCase : int=False):
UpperCamelCase = get_config(_UpperCAmelCase)
# load original model from timm
UpperCamelCase = create_model(_UpperCAmelCase, pretrained=_UpperCAmelCase)
timm_model.eval()
# load state_dict of original model
UpperCamelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCamelCase = state_dict.pop(_UpperCAmelCase)
UpperCamelCase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
UpperCamelCase = BitForImageClassification(_UpperCAmelCase)
model.eval()
model.load_state_dict(_UpperCAmelCase)
# create image processor
UpperCamelCase = create_transform(**resolve_data_config({}, model=_UpperCAmelCase))
UpperCamelCase = transform.transforms
UpperCamelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
UpperCamelCase = BitImageProcessor(
do_resize=_UpperCAmelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=_UpperCAmelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=_UpperCAmelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
UpperCamelCase = prepare_img()
UpperCamelCase = transform(_UpperCAmelCase).unsqueeze(0)
UpperCamelCase = processor(_UpperCAmelCase, return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase, _UpperCAmelCase)
# verify logits
with torch.no_grad():
UpperCamelCase = model(_UpperCAmelCase)
UpperCamelCase = outputs.logits
print('''Logits:''', logits[0, :3])
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1).item()])
UpperCamelCase = timm_model(_UpperCAmelCase)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase, outputs.logits, atol=1E-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase).mkdir(exist_ok=_UpperCAmelCase)
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}')
model.save_pretrained(_UpperCAmelCase)
processor.save_pretrained(_UpperCAmelCase)
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub')
model.push_to_hub(f'ybelkada/{model_name}')
processor.push_to_hub(f'ybelkada/{model_name}')
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
snake_case_ : List[Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 212 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Tuple = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''time_series_transformer'''
_snake_case = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "student_t" , lowerCamelCase__ = "nll" , lowerCamelCase__ = 1 , lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase__ = "mean" , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = True , lowerCamelCase__ = "gelu" , lowerCamelCase__ = 6_4 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 1_0_0 , lowerCamelCase__ = 0.02 , lowerCamelCase__=True , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = prediction_length
UpperCamelCase = context_length or prediction_length
UpperCamelCase = distribution_output
UpperCamelCase = loss
UpperCamelCase = input_size
UpperCamelCase = num_time_features
UpperCamelCase = lags_sequence
UpperCamelCase = scaling
UpperCamelCase = num_dynamic_real_features
UpperCamelCase = num_static_real_features
UpperCamelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCamelCase = cardinality
else:
UpperCamelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCamelCase = embedding_dimension
else:
UpperCamelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase = input_size * len(lowerCamelCase__ ) + self._number_of_features
UpperCamelCase = d_model
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_attention_heads
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = decoder_layers
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = use_cache
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 212 | 1 |
def __UpperCAmelCase ( __a : str ) -> int:
"""simple docstring"""
_a : Any = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_a : Any = set()
return any(
node not in visited and depth_first_search(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
for node in graph )
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[Any] ,__a : int ,__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
visited.add(_lowerCAmelCase )
rec_stk.add(_lowerCAmelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_lowerCAmelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
a__ = TypeVar('''KEY''')
a__ = TypeVar('''VAL''')
@dataclass(frozen=__lowercase , slots=__lowercase )
class UpperCAmelCase_ ( Generic[KEY, VAL] ):
"""simple docstring"""
UpperCAmelCase__ : KEY
UpperCAmelCase__ : VAL
class UpperCAmelCase_ ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
super().__init__(_a , _a )
def __bool__( self ) -> bool:
return False
a__ = _DeletedItem()
class UpperCAmelCase_ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , _a = 8 , _a = 0.75 ) -> None:
_a : Optional[Any] = initial_block_size
_a : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_a : Tuple = capacity_factor
_a : Optional[Any] = 0
def __lowercase ( self , _a ) -> int:
return hash(_a ) % len(self._buckets )
def __lowercase ( self , _a ) -> int:
return (ind + 1) % len(self._buckets )
def __lowercase ( self , _a , _a , _a ) -> bool:
_a : Optional[Any] = self._buckets[ind]
if not stored:
_a : List[Any] = _Item(_a , _a )
self._len += 1
return True
elif stored.key == key:
_a : int = _Item(_a , _a )
return True
else:
return False
def __lowercase ( self ) -> bool:
_a : List[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_a )
def __lowercase ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_a : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __lowercase ( self , _a ) -> None:
_a : Any = self._buckets
_a : str = [None] * new_size
_a : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __lowercase ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __lowercase ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __lowercase ( self , _a ) -> Iterator[int]:
_a : str = self._get_bucket_index(_a )
for _ in range(len(self._buckets ) ):
yield ind
_a : List[Any] = self._get_next_ind(_a )
def __lowercase ( self , _a , _a ) -> None:
for ind in self._iterate_buckets(_a ):
if self._try_set(_a , _a , _a ):
break
def __setitem__( self , _a , _a ) -> None:
if self._is_full():
self._size_up()
self._add_item(_a , _a )
def __delitem__( self , _a ) -> None:
for ind in self._iterate_buckets(_a ):
_a : List[str] = self._buckets[ind]
if item is None:
raise KeyError(_a )
if item is _deleted:
continue
if item.key == key:
_a : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _a ) -> VAL:
for ind in self._iterate_buckets(_a ):
_a : int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_a )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
_a : int = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 578 | 0 |
'''simple docstring'''
from math import sqrt
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = 0
for i in range(1 , int(sqrt(UpperCAmelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCAmelCase_ ):
total += i + n // i
elif i == sqrt(UpperCAmelCase_ ):
total += i
return total - n
def __snake_case ( UpperCAmelCase_ : int = 10000 ):
lowerCamelCase_ = sum(
i
for i in range(1 , UpperCAmelCase_ )
if sum_of_divisors(sum_of_divisors(UpperCAmelCase_ ) ) == i and sum_of_divisors(UpperCAmelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 675 |
'''simple docstring'''
def __snake_case ( ):
lowerCamelCase_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCamelCase_ = 6
lowerCamelCase_ = 1
lowerCamelCase_ = 1901
lowerCamelCase_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCamelCase_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCamelCase_ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCamelCase_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 675 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
snake_case__ : str =TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
snake_case__ : Any =AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case__ : Any =tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
snake_case__ : Optional[int] =tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
snake_case__ : Optional[Any] =model(UpperCAmelCase_ , labels=UpperCAmelCase_ ).loss
snake_case__ : Optional[int] =-tf.math.reduce_mean(UpperCAmelCase_ ).numpy()
snake_case__ : List[str] =-21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 711 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ):
snake_case__ : Any =parent
snake_case__ : Dict =batch_size
snake_case__ : List[Any] =seq_length
snake_case__ : str =is_training
snake_case__ : Union[str, Any] =use_attention_mask
snake_case__ : str =use_token_type_ids
snake_case__ : int =use_labels
snake_case__ : Tuple =vocab_size
snake_case__ : List[str] =hidden_size
snake_case__ : Dict =num_hidden_layers
snake_case__ : Optional[Any] =num_attention_heads
snake_case__ : List[str] =intermediate_size
snake_case__ : str =hidden_act
snake_case__ : Union[str, Any] =hidden_dropout_prob
snake_case__ : Tuple =attention_probs_dropout_prob
snake_case__ : Tuple =max_position_embeddings
snake_case__ : str =type_vocab_size
snake_case__ : Optional[Any] =type_sequence_label_size
snake_case__ : str =initializer_range
snake_case__ : List[Any] =num_choices
def lowercase__ ( self ):
snake_case__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple =None
if self.use_attention_mask:
snake_case__ : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] =None
if self.use_token_type_ids:
snake_case__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Tuple =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self ):
snake_case__ : Optional[int] =self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] =config_and_inputs
snake_case__ : Dict ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowercase ( _A , unittest.TestCase ):
_a : str = True
_a : Optional[Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self ):
snake_case__ : Optional[Any] =FlaxRoFormerModelTester(self )
@slow
def lowercase__ ( self ):
for model_class_name in self.all_model_classes:
snake_case__ : Tuple =model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=a )
snake_case__ : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class _lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
snake_case__ : Optional[int] =FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case__ : Tuple =jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case__ : str =model(a )[0]
snake_case__ : List[str] =5_0_0_0_0
snake_case__ : str =(1, 6, vocab_size)
self.assertEqual(output.shape , a )
snake_case__ : Optional[int] =jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 448 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_UpperCamelCase = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 146 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """philschmid/bart-large-cnn-samsum"""
__SCREAMING_SNAKE_CASE = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
__SCREAMING_SNAKE_CASE = """summarizer"""
__SCREAMING_SNAKE_CASE = AutoTokenizer
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM
__SCREAMING_SNAKE_CASE = ["""text"""]
__SCREAMING_SNAKE_CASE = ["""text"""]
def UpperCamelCase__ (self , __a ) -> int:
"""simple docstring"""
return self.pre_processor(__a , return_tensors='pt' , truncation=__a )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
return self.model.generate(**__a )[0]
def UpperCamelCase__ (self , __a ) -> Optional[Any]:
"""simple docstring"""
return self.pre_processor.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
| 146 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a (lowerCamelCase ):
__a : Dict = "wav2vec2"
def __init__( self : int , __magic_name__ : List[str]=32 , __magic_name__ : int=7_68 , __magic_name__ : Tuple=12 , __magic_name__ : Tuple=12 , __magic_name__ : Any=30_72 , __magic_name__ : List[Any]="gelu" , __magic_name__ : int=0.1 , __magic_name__ : str=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Any=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Union[str, Any]=0.0_2 , __magic_name__ : List[str]=1E-5 , __magic_name__ : Any="group" , __magic_name__ : Dict="gelu" , __magic_name__ : List[str]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __magic_name__ : List[Any]=(5, 2, 2, 2, 2, 2, 2) , __magic_name__ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , __magic_name__ : Optional[Any]=False , __magic_name__ : Any=1_28 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Dict=False , __magic_name__ : List[Any]=True , __magic_name__ : Union[str, Any]=0.0_5 , __magic_name__ : List[str]=10 , __magic_name__ : int=2 , __magic_name__ : Any=0.0 , __magic_name__ : Optional[int]=10 , __magic_name__ : Optional[int]=0 , __magic_name__ : List[str]=3_20 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Dict=0.1 , __magic_name__ : Any=1_00 , __magic_name__ : List[str]=2_56 , __magic_name__ : Dict=2_56 , __magic_name__ : Tuple=0.1 , __magic_name__ : List[str]="sum" , __magic_name__ : List[str]=False , __magic_name__ : Union[str, Any]=False , __magic_name__ : Tuple=2_56 , __magic_name__ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , __magic_name__ : Union[str, Any]=(5, 3, 3, 1, 1) , __magic_name__ : Tuple=(1, 2, 3, 1, 1) , __magic_name__ : Union[str, Any]=5_12 , __magic_name__ : int=0 , __magic_name__ : Optional[Any]=1 , __magic_name__ : Tuple=2 , __magic_name__ : str=False , __magic_name__ : List[Any]=3 , __magic_name__ : Tuple=2 , __magic_name__ : int=3 , __magic_name__ : Tuple=None , __magic_name__ : Union[str, Any]=None , **__magic_name__ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ )
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : int = feat_extract_norm
UpperCAmelCase_ : Any = feat_extract_activation
UpperCAmelCase_ : List[str] = list(__magic_name__ )
UpperCAmelCase_ : str = list(__magic_name__ )
UpperCAmelCase_ : List[Any] = list(__magic_name__ )
UpperCAmelCase_ : List[str] = conv_bias
UpperCAmelCase_ : Dict = num_conv_pos_embeddings
UpperCAmelCase_ : List[str] = num_conv_pos_embedding_groups
UpperCAmelCase_ : Any = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_dropout
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : str = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : Union[str, Any] = final_dropout
UpperCAmelCase_ : int = layerdrop
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : List[str] = do_stable_layer_norm
UpperCAmelCase_ : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : str = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : List[Any] = mask_time_length
UpperCAmelCase_ : Dict = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : Tuple = mask_feature_length
UpperCAmelCase_ : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : List[Any] = num_codevectors_per_group
UpperCAmelCase_ : List[Any] = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : Optional[int] = feat_quantizer_dropout
UpperCAmelCase_ : List[Any] = num_negatives
UpperCAmelCase_ : Optional[int] = codevector_dim
UpperCAmelCase_ : Optional[int] = proj_codevector_dim
UpperCAmelCase_ : Tuple = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : Dict = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : Optional[Any] = num_adapter_layers
UpperCAmelCase_ : Union[str, Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Any = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Tuple = list(__magic_name__ )
UpperCAmelCase_ : List[Any] = list(__magic_name__ )
UpperCAmelCase_ : Dict = list(__magic_name__ )
UpperCAmelCase_ : int = xvector_output_dim
@property
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 644 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = get_activation('''swish''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' )
self.assertIsInstance(__magic_name__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_activation('''mish''' )
self.assertIsInstance(__magic_name__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_activation('''gelu''' )
self.assertIsInstance(__magic_name__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 644 | 1 |
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 0
lowerCAmelCase_ = False
lowerCAmelCase_ = 3.0
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=A_ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , A_ )
@require_multi_gpu
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : Tuple = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : List[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : int = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : Dict = ''
lowerCAmelCase : Dict = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 3 | 1 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a__ ( ):
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def a__ ( lowerCAmelCase__ ):
print("Generating prime p..." )
UpperCAmelCase_ = rabinMiller.generate_large_prime(lowerCAmelCase__ )
print("Generating prime q..." )
UpperCAmelCase_ = rabinMiller.generate_large_prime(lowerCAmelCase__ )
UpperCAmelCase_ = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
UpperCAmelCase_ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCAmelCase__ , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
UpperCAmelCase_ = cryptoMath.find_mod_inverse(lowerCAmelCase__ , (p - 1) * (q - 1) )
UpperCAmelCase_ = (n, e)
UpperCAmelCase_ = (n, d)
return (public_key, private_key)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
UpperCAmelCase_ , UpperCAmelCase_ = generate_key(lowerCAmelCase__ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , "w" ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 14 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ = MaskFormerConfig(backbone_config=lowerCAmelCase__ )
UpperCAmelCase_ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ = 847
UpperCAmelCase_ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ = 150
UpperCAmelCase_ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ = 171
UpperCAmelCase_ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ = 133
UpperCAmelCase_ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ = 19
UpperCAmelCase_ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ = 65
UpperCAmelCase_ = "mapillary-vistas-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# fmt: off
UpperCAmelCase_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
UpperCAmelCase_ = get_maskformer_config(lowerCAmelCase__ )
# load original state_dict
with open(lowerCAmelCase__ , "rb" ) as f:
UpperCAmelCase_ = pickle.load(lowerCAmelCase__ )
UpperCAmelCase_ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ = torch.from_numpy(lowerCAmelCase__ )
# load 🤗 model
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(lowerCAmelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase__ , param.shape )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase__ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ = 65535
else:
UpperCAmelCase_ = 255
UpperCAmelCase_ = True if "ade" in model_name else False
UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ )
UpperCAmelCase_ = image_processor(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = model(**lowerCAmelCase__ )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 1 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict =['pixel_values']
def __init__( self, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = 8, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_pad
lowerCamelCase_ =pad_size
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase ):
"""simple docstring"""
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =get_image_size(lowerCAmelCase )
lowerCamelCase_ =(old_height // size + 1) * size - old_height
lowerCamelCase_ =(old_width // size + 1) * size - old_width
return pad(lowerCAmelCase, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_pad if do_pad is not None else self.do_pad
lowerCamelCase_ =pad_size if pad_size is not None else self.pad_size
lowerCamelCase_ =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_pad:
lowerCamelCase_ =[self.pad(lowerCAmelCase, size=lowerCAmelCase ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowerCamelCase_ ={'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 676 |
'''simple docstring'''
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if number < 0:
return False
lowerCamelCase_ =number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : Any = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class snake_case__ :
def __init__( self : str , __a : Dict , __a : Optional[Any]=13 , __a : Tuple=7 , __a : Any=True , __a : int=True , __a : List[str]=True , __a : Any=True , __a : Optional[Any]=99 , __a : Any=64 , __a : str=5 , __a : List[Any]=4 , __a : Optional[Any]=37 , __a : List[Any]="gelu" , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[str]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Tuple=0.0_2 , __a : List[Any]=3 , __a : Dict=4 , __a : Dict=None , ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : Tuple = seq_length
__snake_case : Union[str, Any] = is_training
__snake_case : Dict = use_input_mask
__snake_case : Dict = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : int = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : str = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Optional[int] = num_labels
__snake_case : Dict = num_choices
__snake_case : int = scope
__snake_case : List[str] = vocab_size - 1
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def A_ ( self : Dict ) -> int:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def A_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Dict = self.prepare_config_and_inputs()
__snake_case : Optional[Any] = True
return config, input_ids, input_mask, token_labels
def A_ ( self : Union[str, Any] , __a : Optional[int] , __a : Any , __a : int ) -> Any:
'''simple docstring'''
__snake_case : int = GPTNeoXModel(config=__a )
model.to(__a )
model.eval()
__snake_case : List[Any] = model(__a , attention_mask=__a )
__snake_case : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Optional[Any] , __a : Union[str, Any] , __a : List[str] , __a : int ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[Any] = True
__snake_case : Tuple = GPTNeoXModel(__a )
model.to(__a )
model.eval()
__snake_case : Optional[int] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Dict , __a : Optional[int] , __a : Dict , __a : Optional[Any] , __a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
__snake_case : str = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Union[str, Any] , __a : List[str] , __a : Optional[int] , __a : Optional[int] , __a : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[Any] = self.num_labels
__snake_case : List[str] = GPTNeoXForQuestionAnswering(__a )
model.to(__a )
model.eval()
__snake_case : List[Any] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Any , __a : Optional[int] , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Any = GPTNeoXForSequenceClassification(__a )
model.to(__a )
model.eval()
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Tuple = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[Any] , __a : str , __a : List[str] , __a : Dict , __a : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Optional[Any] = self.num_labels
__snake_case : List[Any] = GPTNeoXForTokenClassification(__a )
model.to(__a )
model.eval()
__snake_case : Dict = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : int , __a : List[str] , __a : str , __a : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Any = True
__snake_case : List[Any] = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
__snake_case : Dict = model(__a , attention_mask=__a , use_cache=__a )
__snake_case : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : Tuple = model(__a , attention_mask=__a , output_hidden_states=__a )
__snake_case : str = output_from_no_past['hidden_states'][0]
__snake_case : Optional[int] = model(
__a , attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )['hidden_states'][0]
# select random slice
__snake_case : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : int = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
A__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
A__ = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = GPTNeoXModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=__a , hidden_size=64 , num_attention_heads=8 )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : str ) -> int:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def A_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
__snake_case , __snake_case , __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : Dict = None
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__a , __a , __a )
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__a )
def A_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def A_ ( self : List[str] ) -> int:
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def A_ ( self : str ) -> Any:
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def A_ ( self : Union[str, Any] , __a : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] = ids_tensor([1, 10] , config.vocab_size )
__snake_case : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : List[str] = GPTNeoXModel(__a )
original_model.to(__a )
original_model.eval()
__snake_case : Optional[Any] = original_model(__a ).last_hidden_state
__snake_case : str = original_model(__a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : int = {'type': scaling_type, 'factor': 1_0.0}
__snake_case : Optional[Any] = GPTNeoXModel(__a )
scaled_model.to(__a )
scaled_model.eval()
__snake_case : Union[str, Any] = scaled_model(__a ).last_hidden_state
__snake_case : Any = scaled_model(__a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__a , __a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__a , __a , atol=1e-5 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
__snake_case : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__a )
__snake_case : List[str] = tokenizer('My favorite food is' , return_tensors='pt' ).to(__a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__snake_case : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
__snake_case : List[Any] = model.generate(**__a , do_sample=__a , max_new_tokens=20 )
__snake_case : str = tokenizer.batch_decode(__a )[0]
self.assertEqual(__a , __a )
| 124 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : List[str] = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCamelCase (a_ ):
snake_case_ = """pegasus"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __UpperCamelCase=5_0_2_6_5 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=1_2 , __UpperCamelCase=4_0_9_6 , __UpperCamelCase=1_6 , __UpperCamelCase=1_2 , __UpperCamelCase=4_0_9_6 , __UpperCamelCase=1_6 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="gelu" , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=0 , __UpperCamelCase=False , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=1 , **__UpperCamelCase , )-> Tuple:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
@property
def __UpperCAmelCase ( self )-> int:
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self )-> int:
return self.d_model
| 367 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = '''swinv2'''
snake_case_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , A__=224 , A__=4 , A__=3 , A__=96 , A__=[2, 2, 6, 2] , A__=[3, 6, 12, 24] , A__=7 , A__=4.0 , A__=True , A__=0.0 , A__=0.0 , A__=0.1 , A__="gelu" , A__=False , A__=0.02 , A__=1E-5 , A__=32 , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
UpperCAmelCase_: Optional[Any] = image_size
UpperCAmelCase_: Tuple = patch_size
UpperCAmelCase_: str = num_channels
UpperCAmelCase_: str = embed_dim
UpperCAmelCase_: str = depths
UpperCAmelCase_: Dict = len(A__ )
UpperCAmelCase_: Optional[Any] = num_heads
UpperCAmelCase_: Tuple = window_size
UpperCAmelCase_: List[str] = mlp_ratio
UpperCAmelCase_: Optional[int] = qkv_bias
UpperCAmelCase_: Tuple = hidden_dropout_prob
UpperCAmelCase_: str = attention_probs_dropout_prob
UpperCAmelCase_: Dict = drop_path_rate
UpperCAmelCase_: Dict = hidden_act
UpperCAmelCase_: Optional[Any] = use_absolute_embeddings
UpperCAmelCase_: Optional[int] = layer_norm_eps
UpperCAmelCase_: Tuple = initializer_range
UpperCAmelCase_: List[str] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_: Any = int(embed_dim * 2 ** (len(A__ ) - 1) )
UpperCAmelCase_: List[str] = (0, 0, 0, 0) | 306 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase__ ( snake_case__ , snake_case__ ):
snake_case_ = '''swin'''
snake_case_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , A__=224 , A__=4 , A__=3 , A__=96 , A__=[2, 2, 6, 2] , A__=[3, 6, 12, 24] , A__=7 , A__=4.0 , A__=True , A__=0.0 , A__=0.0 , A__=0.1 , A__="gelu" , A__=False , A__=0.02 , A__=1E-5 , A__=32 , A__=None , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
UpperCAmelCase_: Any = image_size
UpperCAmelCase_: Optional[int] = patch_size
UpperCAmelCase_: Optional[int] = num_channels
UpperCAmelCase_: Optional[Any] = embed_dim
UpperCAmelCase_: Union[str, Any] = depths
UpperCAmelCase_: List[Any] = len(A__ )
UpperCAmelCase_: Union[str, Any] = num_heads
UpperCAmelCase_: Dict = window_size
UpperCAmelCase_: int = mlp_ratio
UpperCAmelCase_: int = qkv_bias
UpperCAmelCase_: Dict = hidden_dropout_prob
UpperCAmelCase_: Any = attention_probs_dropout_prob
UpperCAmelCase_: int = drop_path_rate
UpperCAmelCase_: Dict = hidden_act
UpperCAmelCase_: Union[str, Any] = use_absolute_embeddings
UpperCAmelCase_: Any = layer_norm_eps
UpperCAmelCase_: Tuple = initializer_range
UpperCAmelCase_: Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_: Optional[Any] = int(embed_dim * 2 ** (len(A__ ) - 1) )
UpperCAmelCase_: int = ["stem"] + [F"stage{idx}" for idx in range(1 , len(A__ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = get_aligned_output_features_output_indices(
out_features=A__ , out_indices=A__ , stage_names=self.stage_names )
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return 1E-4 | 306 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = [int(UpperCamelCase ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(UpperCamelCase ) == 4 and all(0 <= int(UpperCamelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
_snake_case : Optional[Any] = input().strip()
_snake_case : Any = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 22 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'mctct'
def __init__( self , a__=8065 , a__=1536 , a__=36 , a__=6144 , a__=4 , a__=384 , a__=920 , a__=1e-5 , a__=0.3 , a__="relu" , a__=0.02 , a__=0.3 , a__=0.3 , a__=1 , a__=0 , a__=2 , a__=1 , a__=0.3 , a__=1 , a__=(7,) , a__=(3,) , a__=80 , a__=1 , a__=None , a__="sum" , a__=False , **a__ , ) -> Optional[Any]:
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = intermediate_size
A = num_attention_heads
A = attention_head_dim
A = max_position_embeddings
A = layer_norm_eps
A = layerdrop
A = hidden_act
A = initializer_range
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = pad_token_id
A = bos_token_id
A = eos_token_id
A = conv_glu_dim
A = conv_dropout
A = num_conv_layers
A = input_feat_per_channel
A = input_channels
A = conv_channels
A = ctc_loss_reduction
A = ctc_zero_infinity
# prevents config testing fail with exporting to json
A = list(a__ )
A = list(a__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 641 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Dict = logging.get_logger(__name__)
def _snake_case ( _snake_case : Optional[Any] ):
lowerCAmelCase : Tuple = '''huggingface/label-files'''
lowerCAmelCase : List[str] = '''imagenet-1k-id2label.json'''
lowerCAmelCase : List[str] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase : List[Any] = {int(_snake_case ): v for k, v in idalabel.items()}
lowerCAmelCase : int = {v: k for k, v in idalabel.items()}
lowerCAmelCase : Any = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase : Tuple = BitConfig(
conv_layer=_snake_case , num_labels=1000 , idalabel=_snake_case , labelaid=_snake_case , )
return config
def _snake_case ( _snake_case : Dict ):
if "stem.conv" in name:
lowerCAmelCase : Union[str, Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowerCAmelCase : Any = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
lowerCAmelCase : Optional[int] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
lowerCAmelCase : Dict = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase : Optional[Any] = '''bit.encoder.''' + name
return name
def _snake_case ( ):
lowerCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Optional[Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[int]=False ):
lowerCAmelCase : Any = get_config(_snake_case )
# load original model from timm
lowerCAmelCase : Dict = create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase : Optional[Any] = state_dict.pop(_snake_case )
lowerCAmelCase : Dict = val.squeeze() if '''head''' in key else val
# load HuggingFace model
lowerCAmelCase : List[str] = BitForImageClassification(_snake_case )
model.eval()
model.load_state_dict(_snake_case )
# create image processor
lowerCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=_snake_case ) )
lowerCAmelCase : Optional[Any] = transform.transforms
lowerCAmelCase : Optional[int] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowerCAmelCase : Optional[int] = BitImageProcessor(
do_resize=_snake_case , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_snake_case , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase : int = prepare_img()
lowerCAmelCase : str = transform(_snake_case ).unsqueeze(0 )
lowerCAmelCase : List[Any] = processor(_snake_case , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_snake_case , _snake_case )
# verify logits
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(_snake_case )
lowerCAmelCase : Optional[int] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase : Union[str, Any] = timm_model(_snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_snake_case , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
snake_case__ : Optional[Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 637 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_( a__ ):
__UpperCamelCase = '''philschmid/bart-large-cnn-samsum'''
__UpperCamelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCamelCase = '''summarizer'''
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSeqaSeqLM
__UpperCamelCase = ['''text''']
__UpperCamelCase = ['''text''']
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ):
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ):
return self.model.generate(**UpperCamelCase_ )[0]
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ):
return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
| 637 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def a__ ( _UpperCamelCase : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' ,_UpperCamelCase ,)
if isinstance(_UpperCamelCase ,torch.Tensor ):
return image
elif isinstance(_UpperCamelCase ,PIL.Image.Image ):
__lowerCamelCase = [image]
if isinstance(image[0] ,PIL.Image.Image ):
__lowerCamelCase ,__lowerCamelCase = image[0].size
__lowerCamelCase ,__lowerCamelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCamelCase = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__lowerCamelCase = np.concatenate(_UpperCamelCase ,axis=0 )
__lowerCamelCase = np.array(_UpperCamelCase ).astype(np.floataa ) / 255.0
__lowerCamelCase = image.transpose(0 ,3 ,1 ,2 )
__lowerCamelCase = 2.0 * image - 1.0
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
elif isinstance(image[0] ,torch.Tensor ):
__lowerCamelCase = torch.cat(_UpperCamelCase ,dim=0 )
return image
def a__ ( _UpperCamelCase : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(_UpperCamelCase ,torch.Tensor ):
return mask
elif isinstance(_UpperCamelCase ,PIL.Image.Image ):
__lowerCamelCase = [mask]
if isinstance(mask[0] ,PIL.Image.Image ):
__lowerCamelCase ,__lowerCamelCase = mask[0].size
__lowerCamelCase ,__lowerCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCamelCase = [np.array(m.convert('''L''' ).resize((w, h) ,resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
__lowerCamelCase = np.concatenate(_UpperCamelCase ,axis=0 )
__lowerCamelCase = mask.astype(np.floataa ) / 255.0
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
elif isinstance(mask[0] ,torch.Tensor ):
__lowerCamelCase = torch.cat(_UpperCamelCase ,dim=0 )
return mask
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 250 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 10 , __UpperCAmelCase = 10 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , ):
'''simple docstring'''
__lowerCamelCase = image
__lowerCamelCase = _preprocess_image(__UpperCAmelCase )
__lowerCamelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCamelCase = _preprocess_mask(__UpperCAmelCase )
__lowerCamelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCamelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCamelCase = original_image.shape
__lowerCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.device )
__lowerCamelCase = eta
__lowerCamelCase = self.scheduler.timesteps[0] + 1
__lowerCamelCase = generator[0] if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute previous image: x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCamelCase = self.scheduler.undo_step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = t
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 175 |
import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.0_09
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any]="train" ):
return calculate_hypothesis_value(_UpperCamelCase ,_UpperCamelCase ) - output(
_UpperCamelCase ,_UpperCamelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
__lowerCamelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : List[Any] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Union[str, Any] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Dict=m ):
__lowerCamelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def a__ ( _UpperCamelCase : Optional[int] ):
__lowerCamelCase = summation_of_cost_derivative(_UpperCamelCase ,_UpperCamelCase ) / m
return cost_derivative_value
def a__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCamelCase = 0.000_002
__lowerCamelCase = 0
__lowerCamelCase = 0
while True:
j += 1
__lowerCamelCase = [0, 0, 0, 0]
for i in range(0 ,len(_UpperCamelCase ) ):
__lowerCamelCase = get_cost_derivative(i - 1 )
__lowerCamelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase ,_UpperCamelCase ,atol=_UpperCamelCase ,rtol=_UpperCamelCase ,):
break
__lowerCamelCase = temp_parameter_vector
print(('''Number of iterations:''', j) )
def a__ ( ):
for i in range(len(_UpperCamelCase ) ):
print(('''Actual output value:''', output(_UpperCamelCase ,'''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(_UpperCamelCase ,'''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 175 | 1 |
'''simple docstring'''
def __lowercase (_lowercase, _lowercase ) -> int:
"""simple docstring"""
while second != 0:
__lowerCamelCase : int = first & second
first ^= second
__lowerCamelCase : Optional[Any] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ :List[str] = int(input("""Enter the first number: """).strip())
UpperCAmelCase__ :Optional[int] = int(input("""Enter the second number: """).strip())
print(f'''{add(first, second) = }''')
| 483 |
'''simple docstring'''
import math
def __lowercase () -> None:
"""simple docstring"""
__lowerCamelCase : Optional[int] = input("""Enter message: """ )
__lowerCamelCase : Optional[Any] = int(input(f"Enter key [2-{len(_lowercase ) - 1}]: " ) )
__lowerCamelCase : Dict = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowerCamelCase : Any = encrypt_message(_lowercase, _lowercase )
elif mode.lower().startswith("""d""" ):
__lowerCamelCase : int = decrypt_message(_lowercase, _lowercase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"Output:\n{text + '|'}" )
def __lowercase (_lowercase, _lowercase ) -> str:
"""simple docstring"""
__lowerCamelCase : int = [""""""] * key
for col in range(_lowercase ):
__lowerCamelCase : Optional[Any] = col
while pointer < len(_lowercase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_lowercase )
def __lowercase (_lowercase, _lowercase ) -> str:
"""simple docstring"""
__lowerCamelCase : List[str] = math.ceil(len(_lowercase ) / key )
__lowerCamelCase : List[str] = key
__lowerCamelCase : Tuple = (num_cols * num_rows) - len(_lowercase )
__lowerCamelCase : List[str] = [""""""] * num_cols
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__lowerCamelCase : int = 0
row += 1
return "".join(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 483 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Optional[int] ={
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any =['MobileNetV2FeatureExtractor']
_lowercase : Dict =['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =[
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowercase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 136 |
from math import ceil
def __a ( SCREAMING_SNAKE_CASE = 1_0_0_1 ) -> int:
'''simple docstring'''
__UpperCAmelCase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__UpperCAmelCase = 2 * i + 1
__UpperCAmelCase = 2 * i
__UpperCAmelCase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A_ : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 303 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : str = 13
UpperCAmelCase : int = 7
UpperCAmelCase : int = True
UpperCAmelCase : Any = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : List[Any] = True
UpperCAmelCase : List[str] = True
UpperCAmelCase : List[Any] = False
UpperCAmelCase : str = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 99
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : int = 32
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : Union[str, Any] = 4
UpperCAmelCase : Any = 0.1
UpperCAmelCase : Dict = 0.1
UpperCAmelCase : str = 512
UpperCAmelCase : str = 16
UpperCAmelCase : Optional[Any] = 2
UpperCAmelCase : Any = 0.02
UpperCAmelCase : List[str] = 3
UpperCAmelCase : str = 4
UpperCAmelCase : Optional[int] = """last"""
UpperCAmelCase : Tuple = True
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Any = 0
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCAmelCase : Union[str, Any] = None
if self.use_input_lengths:
UpperCAmelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Union[str, Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = [input_ids, input_mask]
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Tuple = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = self.num_choices
UpperCAmelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : List[Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Any = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : Tuple = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowerCAmelCase : Union[str, Any] = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : str = False
__lowerCAmelCase : Dict = False
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = TFFlaubertModelTester(self )
UpperCAmelCase : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : str = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : str = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase : int = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 359 |
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _snake_case ( UpperCamelCase : bytes , UpperCamelCase : int ):
UpperCAmelCase : Tuple = F"{sampling_rate}"
UpperCAmelCase : Optional[int] = """1"""
UpperCAmelCase : Dict = """f32le"""
UpperCAmelCase : Optional[int] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(UpperCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase : str = ffmpeg_process.communicate(UpperCamelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
UpperCAmelCase : Tuple = output_stream[0]
UpperCAmelCase : Union[str, Any] = np.frombuffer(UpperCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _snake_case ( UpperCamelCase : int , UpperCamelCase : float , UpperCamelCase : str = "f32le" , ):
UpperCAmelCase : Tuple = F"{sampling_rate}"
UpperCAmelCase : List[str] = """1"""
if format_for_conversion == "s16le":
UpperCAmelCase : int = 2
elif format_for_conversion == "f32le":
UpperCAmelCase : int = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
UpperCAmelCase : Optional[Any] = platform.system()
if system == "Linux":
UpperCAmelCase : Tuple = """alsa"""
UpperCAmelCase : Any = """default"""
elif system == "Darwin":
UpperCAmelCase : Any = """avfoundation"""
UpperCAmelCase : int = """:0"""
elif system == "Windows":
UpperCAmelCase : Optional[int] = """dshow"""
UpperCAmelCase : str = """default"""
UpperCAmelCase : Dict = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
UpperCAmelCase : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase : Union[str, Any] = _ffmpeg_stream(UpperCamelCase , UpperCamelCase )
for item in iterator:
yield item
def _snake_case ( UpperCamelCase : int , UpperCamelCase : float , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Union[Tuple[float, float], float]] = None , UpperCamelCase : str = "f32le" , ):
if stream_chunk_s is not None:
UpperCAmelCase : List[Any] = stream_chunk_s
else:
UpperCAmelCase : List[Any] = chunk_length_s
UpperCAmelCase : Union[str, Any] = ffmpeg_microphone(UpperCamelCase , UpperCamelCase , format_for_conversion=UpperCamelCase )
if format_for_conversion == "s16le":
UpperCAmelCase : Dict = np.intaa
UpperCAmelCase : Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCAmelCase : Any = np.floataa
UpperCAmelCase : Dict = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
UpperCAmelCase : Tuple = chunk_length_s / 6
UpperCAmelCase : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCamelCase , (int, float) ):
UpperCAmelCase : str = [stride_length_s, stride_length_s]
UpperCAmelCase : List[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase : List[Any] = datetime.datetime.now()
UpperCAmelCase : Tuple = datetime.timedelta(seconds=UpperCamelCase )
for item in chunk_bytes_iter(UpperCamelCase , UpperCamelCase , stride=(stride_left, stride_right) , stream=UpperCamelCase ):
# Put everything back in numpy scale
UpperCAmelCase : Dict = np.frombuffer(item["""raw"""] , dtype=UpperCamelCase )
UpperCAmelCase : List[str] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
UpperCAmelCase : Optional[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Tuple[int, int] , UpperCamelCase : bool = False ):
UpperCAmelCase : Any = B""""""
UpperCAmelCase , UpperCAmelCase : List[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
UpperCAmelCase : int = 0
for raw in iterator:
acc += raw
if stream and len(UpperCamelCase ) < chunk_len:
UpperCAmelCase : Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase : List[str] = (_stride_left, stride_right)
UpperCAmelCase : List[str] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
UpperCAmelCase : List[str] = False
yield item
UpperCAmelCase : Optional[Any] = stride_left
UpperCAmelCase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCamelCase ) > stride_left:
UpperCAmelCase : Union[str, Any] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
UpperCAmelCase : str = False
yield item
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int ):
UpperCAmelCase : List[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(UpperCamelCase , stdout=subprocess.PIPE , bufsize=UpperCamelCase ) as ffmpeg_process:
while True:
UpperCAmelCase : Optional[Any] = ffmpeg_process.stdout.read(UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 359 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = "vit_mae"
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.0_2 , A_=1e-12 , A_=224 , A_=16 , A_=3 , A_=True , A_=16 , A_=512 , A_=8 , A_=2048 , A_=0.7_5 , A_=False , **A_ , ) -> Optional[int]:
super().__init__(**_lowerCAmelCase )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = decoder_num_attention_heads
lowerCAmelCase = decoder_hidden_size
lowerCAmelCase = decoder_num_hidden_layers
lowerCAmelCase = decoder_intermediate_size
lowerCAmelCase = mask_ratio
lowerCAmelCase = norm_pix_loss | 433 |
'''simple docstring'''
from collections.abc import Sequence
def _A ( _lowerCAmelCase = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
__lowercase =nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
__lowercase =nums[i]
__lowercase =max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase = int(input("""Enter number of elements : """).strip())
lowerCamelCase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 474 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case__ = logging.get_logger(__name__)
snake_case__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
snake_case__ = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
snake_case__ = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = SqueezeBertTokenizer
def __init__( self : Optional[Any] , __A : List[str]=None , __A : Optional[Any]=None , __A : Optional[int]=True , __A : str="[UNK]" , __A : Optional[int]="[SEP]" , __A : int="[PAD]" , __A : Union[str, Any]="[CLS]" , __A : Union[str, Any]="[MASK]" , __A : Tuple=True , __A : Tuple=None , **__A : Union[str, Any] , ) ->List[Any]:
"""simple docstring"""
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
a__ :List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __A ) != do_lower_case
or normalizer_state.get("strip_accents" , __A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __A ) != tokenize_chinese_chars
):
a__ :Dict = getattr(__A , normalizer_state.pop("type" ) )
a__ :List[str] = do_lower_case
a__ :Optional[Any] = strip_accents
a__ :Optional[int] = tokenize_chinese_chars
a__ :Optional[int] = normalizer_class(**__A )
a__ :List[Any] = do_lower_case
def _snake_case ( self : Optional[int] , __A : Tuple , __A : Union[str, Any]=None ) ->List[Any]:
"""simple docstring"""
a__ :Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self : Tuple , __A : List[int] , __A : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a__ :int = [self.sep_token_id]
a__ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Union[str, Any] , __A : str , __A : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a__ :Any = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 706 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 373 | 0 |
"""simple docstring"""
from __future__ import annotations
__magic_name__ = [True] * 1_00_00_01
__magic_name__ = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
__magic_name__ = False
i += 1
def _A ( __lowercase ):
"""simple docstring"""
return seive[n]
def _A ( __lowercase ):
"""simple docstring"""
return any(digit in """02468""" for digit in str(snake_case_ ) )
def _A ( __lowercase = 100_0000 ):
"""simple docstring"""
lowerCamelCase__ = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(snake_case_ ) and not contains_an_even_digit(snake_case_ ):
lowerCamelCase__ = str(snake_case_ )
lowerCamelCase__ = [int(str_num[j:] + str_num[:j] ) for j in range(len(snake_case_ ) )]
if all(is_prime(snake_case_ ) for i in list_nums ):
result.append(snake_case_ )
return result
def _A ( ):
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 129 | '''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0.999 , snake_case_ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(snake_case_ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
a__ : Optional[Any] = 2
@register_to_config
def __init__(self : Union[str, Any] , __a : int = 1000 , __a : float = 0.0_00_85 , __a : float = 0.0_12 , __a : str = "linear" , __a : Optional[Union[np.ndarray, List[float]]] = None , __a : str = "epsilon" , __a : Optional[bool] = False , __a : Optional[bool] = False , __a : float = 1.0 , __a : str = "linspace" , __a : int = 0 , ):
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase_ = betas_for_alpha_bar(__a , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__a , __a , __a )
UpperCAmelCase_ = use_karras_sigmas
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : Tuple=None ):
if schedule_timesteps is None:
UpperCAmelCase_ = self.timesteps
UpperCAmelCase_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ = 1 if len(__a ) > 1 else 0
else:
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
UpperCAmelCase_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self : Optional[Any] , __a : torch.FloatTensor , __a : Union[float, torch.FloatTensor] , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self : Any , __a : int , __a : Union[str, torch.device] = None , __a : Optional[int] = None , ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ = np.log(__a )
UpperCAmelCase_ = np.interp(__a , np.arange(0 , len(__a ) ) , __a )
if self.config.use_karras_sigmas:
UpperCAmelCase_ = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps )
UpperCAmelCase_ = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] )
UpperCAmelCase_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ = torch.from_numpy(__a ).to(device=__a )
UpperCAmelCase_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ = torch.from_numpy(__a )
UpperCAmelCase_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__a ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase_ = timesteps.to(__a , dtype=torch.floataa )
else:
UpperCAmelCase_ = timesteps.to(device=__a )
# empty dt and derivative
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ = defaultdict(__a )
def _lowercase (self : int , __a : Optional[Any] , __a : List[str] ):
# get log sigma
UpperCAmelCase_ = np.log(__a )
# get distribution
UpperCAmelCase_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase_ = low_idx + 1
UpperCAmelCase_ = log_sigmas[low_idx]
UpperCAmelCase_ = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ = (low - log_sigma) / (low - high)
UpperCAmelCase_ = np.clip(__a , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ = t.reshape(sigma.shape )
return t
def _lowercase (self : Dict , __a : torch.FloatTensor , __a : Optional[int] ):
UpperCAmelCase_ = in_sigmas[-1].item()
UpperCAmelCase_ = in_sigmas[0].item()
UpperCAmelCase_ = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase_ = np.linspace(0 , 1 , __a )
UpperCAmelCase_ = sigma_min ** (1 / rho)
UpperCAmelCase_ = sigma_max ** (1 / rho)
UpperCAmelCase_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self : List[str] ):
return self.dt is None
def _lowercase (self : List[Any] , __a : Union[torch.FloatTensor, np.ndarray] , __a : Union[float, torch.FloatTensor] , __a : Union[torch.FloatTensor, np.ndarray] , __a : bool = True , ):
UpperCAmelCase_ = self.index_for_timestep(__a )
# advance index counter by 1
UpperCAmelCase_ = timestep.cpu().item() if torch.is_tensor(__a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ = self.sigmas[step_index]
UpperCAmelCase_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase_ = self.sigmas[step_index - 1]
UpperCAmelCase_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ = 0
UpperCAmelCase_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase_ = derivative
UpperCAmelCase_ = dt
UpperCAmelCase_ = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase_ = (sample - pred_original_sample) / sigma_next
UpperCAmelCase_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase_ = self.dt
UpperCAmelCase_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def _lowercase (self : Any , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__a ):
# mps does not support float64
UpperCAmelCase_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ = self.timesteps.to(original_samples.device )
UpperCAmelCase_ = timesteps.to(original_samples.device )
UpperCAmelCase_ = [self.index_for_timestep(__a , __a ) for t in timesteps]
UpperCAmelCase_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ = sigma.unsqueeze(-1 )
UpperCAmelCase_ = original_samples + noise * sigma
return noisy_samples
def __len__(self : str ):
return self.config.num_train_timesteps
| 78 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowercase = '''pt'''
elif is_tf_available():
_lowercase = '''tf'''
else:
_lowercase = '''jax'''
class __A ( A_ , unittest.TestCase ):
UpperCamelCase :Optional[Any] = ByTaTokenizer
UpperCamelCase :Union[str, Any] = False
def _snake_case (self ):
super().setUp()
lowerCamelCase__ : Optional[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case (self ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def _snake_case (self , **__magic_name__ ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : Tuple = []
for i in range(len(__magic_name__ ) ):
try:
lowerCamelCase__ : str = tokenizer.decode([i] , clean_up_tokenization_spaces=__magic_name__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Tuple = list(filter(lambda __magic_name__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __magic_name__ ) )
lowerCamelCase__ : Any = list(filter(lambda __magic_name__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__magic_name__ ) , __magic_name__ ) )
if max_length is not None and len(__magic_name__ ) > max_length:
lowerCamelCase__ : List[Any] = toks[:max_length]
if min_length is not None and len(__magic_name__ ) < min_length and len(__magic_name__ ) > 0:
while len(__magic_name__ ) < min_length:
lowerCamelCase__ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Optional[Any] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : List[str] = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
if " " not in output_txt and len(__magic_name__ ) > 1:
lowerCamelCase__ : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__magic_name__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__magic_name__ )
)
if with_prefix_space:
lowerCamelCase__ : Optional[Any] = """ """ + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
return output_txt, output_ids
def _snake_case (self ):
lowerCamelCase__ : List[Any] = self.ta_base_tokenizer
lowerCamelCase__ : Dict = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCamelCase__ : List[str] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def _snake_case (self ):
lowerCamelCase__ : int = self.ta_base_tokenizer
lowerCamelCase__ : Optional[int] = """Unicode €."""
lowerCamelCase__ : Dict = tokenizer(__magic_name__ )
lowerCamelCase__ : Optional[int] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , __magic_name__ )
# decoding
lowerCamelCase__ : Optional[int] = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , """Unicode €.</s>""" )
lowerCamelCase__ : Tuple = tokenizer("""e è é ê ë""" )
lowerCamelCase__ : Dict = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , __magic_name__ )
# decoding
lowerCamelCase__ : Optional[int] = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.ta_base_tokenizer
lowerCamelCase__ : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCamelCase__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase__ : Any = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
if FRAMEWORK != "jax":
lowerCamelCase__ : Tuple = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.ta_base_tokenizer
lowerCamelCase__ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase__ : List[str] = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertNotIn("""decoder_input_ids""" , __magic_name__ )
self.assertNotIn("""decoder_attention_mask""" , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.ta_base_tokenizer
lowerCamelCase__ : List[str] = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCamelCase__ : Union[str, Any] = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors=__magic_name__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _snake_case (self ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : Dict = ["""A long paragraph for summarization. </s>"""]
lowerCamelCase__ : Optional[Any] = ["""Summary of the text. </s>"""]
# fmt: off
lowerCamelCase__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase__ : Union[str, Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase__ : Optional[int] = tokenizer(__magic_name__ , text_target=__magic_name__ )
self.assertEqual(__magic_name__ , batch["""input_ids"""][0] )
self.assertEqual(__magic_name__ , batch["""labels"""][0] )
def _snake_case (self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : List[Any] = tempfile.mkdtemp()
lowerCamelCase__ : Tuple = """ He is very happy, UNwant\u00E9d,running"""
lowerCamelCase__ : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(__magic_name__ )
lowerCamelCase__ : Tuple = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
lowerCamelCase__ : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : str = tempfile.mkdtemp()
lowerCamelCase__ : Tuple = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCamelCase__ : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCamelCase__ : List[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
lowerCamelCase__ : str = tokenizer.__class__.from_pretrained(__magic_name__ )
lowerCamelCase__ : str = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : List[str] = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Union[str, Any] = json.load(__magic_name__ )
lowerCamelCase__ : Tuple = [f"<extra_id_{i}>" for i in range(125 )]
lowerCamelCase__ : Dict = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCamelCase__ : Any = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Optional[Any] = tokenizer_class.from_pretrained(
__magic_name__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__magic_name__ )]
lowerCamelCase__ : Optional[Any] = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
lowerCamelCase__ : Optional[Any] = tokenizer_class.from_pretrained(__magic_name__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def _snake_case (self ):
pass
def _snake_case (self ):
pass
def _snake_case (self ):
pass
def _snake_case (self ):
pass
def _snake_case (self ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCamelCase__ : Dict = self.get_tokenizers(fast=__magic_name__ , do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase__ : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCamelCase__ : Tuple = tokenizer.convert_tokens_to_string(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase__ : Optional[Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCamelCase__ : int = 0
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(
__magic_name__ , skip_special_tokens=__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 96 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowercase = logging.get_logger(__name__)
def _A (UpperCamelCase : Dict , UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
lowerCamelCase__ : List[str] = nn.functional.normalize(UpperCamelCase )
lowerCamelCase__ : Tuple = nn.functional.normalize(UpperCamelCase )
return torch.mm(UpperCamelCase , normalized_text_embeds.t() )
class __A ( A_ ):
UpperCamelCase :Optional[Any] = CLIPConfig
UpperCamelCase :Optional[Any] = ['''CLIPEncoderLayer''']
def __init__(self , __magic_name__ ):
super().__init__(__magic_name__ )
lowerCamelCase__ : Union[str, Any] = CLIPVisionModel(config.vision_config )
lowerCamelCase__ : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__magic_name__ )
lowerCamelCase__ : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Any = nn.Parameter(torch.ones(17 ) , requires_grad=__magic_name__ )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__magic_name__ )
@torch.no_grad()
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Any = self.vision_model(__magic_name__ )[1] # pooled_output
lowerCamelCase__ : Tuple = self.visual_projection(__magic_name__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase__ : Any = cosine_distance(__magic_name__ , self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase__ : str = cosine_distance(__magic_name__ , self.concept_embeds ).cpu().float().numpy()
lowerCamelCase__ : int = []
lowerCamelCase__ : str = image_embeds.shape[0]
for i in range(__magic_name__ ):
lowerCamelCase__ : str = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase__ : List[str] = special_cos_dist[i][concept_idx]
lowerCamelCase__ : Tuple = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase__ : str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
lowerCamelCase__ : List[str] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase__ : str = cos_dist[i][concept_idx]
lowerCamelCase__ : Dict = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase__ : int = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__magic_name__ )
result.append(__magic_name__ )
lowerCamelCase__ : Tuple = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case (self , __magic_name__ , __magic_name__ ):
lowerCamelCase__ : Union[str, Any] = self.vision_model(__magic_name__ )[1] # pooled_output
lowerCamelCase__ : str = self.visual_projection(__magic_name__ )
lowerCamelCase__ : Optional[Any] = cosine_distance(__magic_name__ , self.special_care_embeds )
lowerCamelCase__ : Dict = cosine_distance(__magic_name__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase__ : Any = 0.0
lowerCamelCase__ : Dict = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase__ : Union[str, Any] = torch.any(special_scores > 0 , dim=1 )
lowerCamelCase__ : Optional[Any] = special_care * 0.01
lowerCamelCase__ : Dict = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCamelCase__ : Tuple = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase__ : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 96 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCamelCase__ : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
__a : str = field(
default=lowerCamelCase_ , metadata={"help": "Model type selected in the list: " + ", ".join(lowerCamelCase_ )} )
__a : str = field(
default=lowerCamelCase_ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
__a : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : int = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
__a : int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
__a : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
__a : bool = field(
default=lowerCamelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
__a : bool = field(
default=lowerCamelCase_ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
__a : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
__a : int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
__a : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
__a : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Tuple = "train"
__a : Dict = "dev"
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : SquadDataTrainingArguments
__a : List[SquadFeatures]
__a : Split
__a : bool
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__ = Split.train ,snake_case__ = False ,snake_case__ = None ,snake_case__ = "pt" ,):
SCREAMING_SNAKE_CASE_ : Tuple = args
SCREAMING_SNAKE_CASE_ : Dict = is_language_sensitive
SCREAMING_SNAKE_CASE_ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case__ ,snake_case__ ):
try:
SCREAMING_SNAKE_CASE_ : Any = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
SCREAMING_SNAKE_CASE_ : Any = mode
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'v2' if args.version_2_with_negative else 'v1'
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Any = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE_ : Tuple = time.time()
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(snake_case__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
SCREAMING_SNAKE_CASE_ : List[Any] = self.old_features['features']
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.old_features.get('dataset' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.old_features.get('examples' ,snake_case__ )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' ,time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
SCREAMING_SNAKE_CASE_ : str = self.processor.get_dev_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE_ : List[str] = self.processor.get_train_examples(args.data_dir )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = squad_convert_examples_to_features(
examples=self.examples ,tokenizer=snake_case__ ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} ,snake_case__ ,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
# Convert to Tensors and build dataset
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.features[i]
SCREAMING_SNAKE_CASE_ : str = torch.tensor(feature.input_ids ,dtype=torch.long )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(feature.attention_mask ,dtype=torch.long )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(feature.token_type_ids ,dtype=torch.long )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(feature.cls_index ,dtype=torch.long )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(feature.p_mask ,dtype=torch.float )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(feature.is_impossible ,dtype=torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(feature.start_position ,dtype=torch.long )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(feature.end_position ,dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 105 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[List[float]] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
else:
return _interleave_iterable_datasets(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , stopping_strategy=lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[DatasetType] , lowerCamelCase_ : Optional[DatasetInfo] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(lowerCamelCase_ ):
if not isinstance(lowerCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowerCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowerCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = (
(Dataset, IterableDataset) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
else:
return _concatenate_iterable_datasets(lowerCamelCase_ , info=lowerCamelCase_ , split=lowerCamelCase_ , axis=lowerCamelCase_ )
| 105 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=13 ,_lowerCAmelCase=7 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=99 ,_lowerCAmelCase=64 ,_lowerCAmelCase=32 ,_lowerCAmelCase=5 ,_lowerCAmelCase=4 ,_lowerCAmelCase=37 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=16 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3 ,_lowerCAmelCase=4 ,_lowerCAmelCase=None ,):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = embedding_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def UpperCamelCase_ ( self ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] ,self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_lowerCAmelCase ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase ,token_type_ids=_lowerCAmelCase )
lowerCamelCase__ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForNextSentencePrediction(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase ,next_sentence_label=_lowerCAmelCase ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = MegatronBertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,start_positions=_lowerCAmelCase ,end_positions=_lowerCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MegatronBertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MegatronBertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = MegatronBertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCamelCase__ = model(
_lowerCAmelCase ,attention_mask=_lowerCAmelCase ,token_type_ids=_lowerCAmelCase ,labels=_lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (a ,a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = True
# test_resize_embeddings = False
_UpperCamelCase = False
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=False ):
lowerCamelCase__ = super()._prepare_for_class(_lowerCAmelCase ,_lowerCAmelCase ,return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
lowerCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=_lowerCAmelCase )
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_lowerCAmelCase )
return inputs_dict
def UpperCamelCase_ ( self ):
lowerCamelCase__ = MegatronBertModelTester(self )
lowerCamelCase__ = ConfigTester(self ,config_class=_lowerCAmelCase ,hidden_size=37 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[int] ):
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
UpperCamelCase : Dict = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowerCamelCase__ = os.path.join(os.environ["""MYDIR"""] ,_lowerCAmelCase )
lowerCamelCase__ = MegatronBertModel.from_pretrained(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.half()
lowerCamelCase__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
lowerCamelCase__ = model(_lowerCAmelCase )[0]
lowerCamelCase__ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape ,_lowerCAmelCase )
lowerCamelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCamelCase__ = output[0, ii, jj]
lowerCamelCase__ = expected[3 * ii + jj]
lowerCamelCase__ = """ii={} jj={} a={} b={}""".format(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
self.assertTrue(math.isclose(_lowerCAmelCase ,_lowerCAmelCase ,rel_tol=_lowerCAmelCase ,abs_tol=_lowerCAmelCase ) ,msg=_lowerCAmelCase )
| 9 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase : Dict = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCamelCase : List[Any] = {
'camembert-base': 5_12,
}
UpperCamelCase : List[str] = '▁'
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowerCamelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase__ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = []
lowerCamelCase__ = """"""
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 9 | 1 |
def UpperCamelCase (lowercase_: Optional[int] = 1000 ) -> int:
A__ : Tuple = 3
A__ : List[str] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 456 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 57 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
UpperCamelCase = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
@lru_cache()
def lowerCamelCase_ ( ) -> Any:
__A : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__A : Dict = bs[:]
__A : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
__A : List[str] = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def lowerCamelCase_ ( _lowercase ) -> List[str]:
__A : Any = set()
__A : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : Dict = char
return pairs
class _a ( a__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
__A : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
__A : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
__A : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
__A : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
__A : Tuple = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
__A : Tuple = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="utf-8" ) as vocab_handle:
__A : int = json.load(lowerCamelCase_ )
__A : List[Any] = {v: k for k, v in self.encoder.items()}
__A : Optional[Any] = errors # how to handle errors in decoding
__A : Optional[Any] = bytes_to_unicode()
__A : Any = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="utf-8" ) as merges_handle:
__A : Tuple = merges_handle.read().split("\n" )[1:-1]
__A : Any = [tuple(merge.split() ) for merge in bpe_merges]
__A : Optional[Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
__A : List[str] = {}
__A : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A : int = re.compile(r"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCAmelCase( self ):
return len(self.encoder )
def __UpperCAmelCase( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase( self , __UpperCAmelCase ):
if token in self.cache:
return self.cache[token]
__A : Dict = tuple(lowerCamelCase_ )
__A : List[str] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
__A : str = min(lowerCamelCase_ , key=lambda __UpperCAmelCase : self.bpe_ranks.get(lowerCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A : List[Any] = bigram
__A : Optional[int] = []
__A : Optional[int] = 0
while i < len(lowerCamelCase_ ):
try:
__A : List[Any] = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__A : int = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A : Optional[Any] = tuple(lowerCamelCase_ )
__A : Optional[int] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
__A : Tuple = get_pairs(lowerCamelCase_ )
__A : List[str] = " ".join(lowerCamelCase_ )
__A : List[str] = word
return word
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : List[str] = []
for token in re.findall(self.pat , lowerCamelCase_ ):
__A : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(" " ) )
return bpe_tokens
def __UpperCAmelCase( self , __UpperCAmelCase ):
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase( self , __UpperCAmelCase ):
return self.decoder.get(lowerCamelCase_ )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Optional[Any] = "".join(lowerCamelCase_ )
__A : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__A : List[str] = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__A : Optional[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + "\n" )
__A : List[str] = 0
with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
__A : List[str] = token_index
writer.write(" ".join(lowerCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : Optional[int] = [self.sep_token_id]
__A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase ):
__A : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
__A : Union[str, Any] = " " + text
return (text, kwargs)
| 710 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """vit_msn"""
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-06 , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
__A : Any = hidden_size
__A : Optional[int] = num_hidden_layers
__A : str = num_attention_heads
__A : Any = intermediate_size
__A : Dict = hidden_act
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : Dict = initializer_range
__A : List[str] = layer_norm_eps
__A : int = image_size
__A : Any = patch_size
__A : Optional[int] = num_channels
__A : int = qkv_bias
| 387 | 0 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def snake_case_ ( __snake_case : Tuple=None , __snake_case : int=None) -> int:
return field(default_factory=lambda: default , metadata=__snake_case)
@dataclass
class __UpperCAmelCase :
__A : str = field(
metadata={'help': 'The csv file to plot.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
__A : bool = field(
default=__a , metadata={'help': 'Disable logarithmic scale when plotting'} , )
__A : bool = field(
default=__a , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
__A : Optional[str] = field(
default=__a , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
__A : Optional[List[str]] = list_field(
default=__a , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def snake_case_ ( __snake_case : Optional[Any]) -> Dict:
try:
int(__snake_case)
return True
except ValueError:
return False
def snake_case_ ( __snake_case : Dict) -> int:
try:
float(__snake_case)
return True
except ValueError:
return False
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase ):
lowerCAmelCase_ = args
lowerCAmelCase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
lowerCAmelCase_ = csv.DictReader(_lowerCamelCase )
for row in reader:
lowerCAmelCase_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
lowerCAmelCase_ = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
lowerCAmelCase_ = float(row['''result'''] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ ,lowerCAmelCase_ = plt.subplots()
lowerCAmelCase_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
lowerCAmelCase_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
lowerCAmelCase_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
lowerCAmelCase_ = self.result_dict[model_name]['''result''']
((lowerCAmelCase_) ,(lowerCAmelCase_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCamelCase , )
else:
lowerCAmelCase_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCAmelCase_) ,(lowerCAmelCase_)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
lowerCAmelCase_ = np.asarray(_lowerCamelCase , _lowerCamelCase )[: len(_lowerCamelCase )]
plt.scatter(
_lowerCamelCase , _lowerCamelCase , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_lowerCamelCase , _lowerCamelCase , '''--''' )
title_str += F''' {label_model_name} vs.'''
lowerCAmelCase_ = title_str[:-4]
lowerCAmelCase_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_lowerCamelCase )
plt.xlabel(_lowerCamelCase )
plt.ylabel(_lowerCamelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def snake_case_ ( ) -> Tuple:
lowerCAmelCase_ = HfArgumentParser(__snake_case)
lowerCAmelCase_ = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ = Plot(args=__snake_case)
plot.plot()
if __name__ == "__main__":
main()
| 274 | '''simple docstring'''
import math
import sys
def snake_case_ ( __snake_case : str) -> str:
lowerCAmelCase_ = ''''''
try:
with open(__snake_case , '''rb''') as binary_file:
lowerCAmelCase_ = binary_file.read()
for dat in data:
lowerCAmelCase_ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''')
sys.exit()
def snake_case_ ( __snake_case : str) -> str:
lowerCAmelCase_ = {'''0''': '''0''', '''1''': '''1'''}
lowerCAmelCase_ ,lowerCAmelCase_ = '''''', ''''''
lowerCAmelCase_ = len(__snake_case)
for i in range(len(__snake_case)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase_ = lexicon[curr_string]
result += last_match_id
lowerCAmelCase_ = last_match_id + '''0'''
if math.loga(__snake_case).is_integer():
lowerCAmelCase_ = {}
for curr_key in list(__snake_case):
lowerCAmelCase_ = lexicon.pop(__snake_case)
lowerCAmelCase_ = new_lex
lowerCAmelCase_ = last_match_id + '''1'''
index += 1
lowerCAmelCase_ = ''''''
return result
def snake_case_ ( __snake_case : str , __snake_case : str) -> None:
lowerCAmelCase_ = 8
try:
with open(__snake_case , '''wb''') as opened_file:
lowerCAmelCase_ = [
to_write[i : i + byte_length]
for i in range(0 , len(__snake_case) , __snake_case)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('''10000000''')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__snake_case , 2).to_bytes(1 , byteorder='''big'''))
except OSError:
print('''File not accessible''')
sys.exit()
def snake_case_ ( __snake_case : str) -> str:
lowerCAmelCase_ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase_ = data_bits[counter:]
lowerCAmelCase_ = data_bits[counter + 1 :]
return data_bits
def snake_case_ ( __snake_case : str , __snake_case : str) -> None:
lowerCAmelCase_ = read_file_binary(__snake_case)
lowerCAmelCase_ = remove_prefix(__snake_case)
lowerCAmelCase_ = decompress_data(__snake_case)
write_file_binary(__snake_case , __snake_case)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 274 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
lowercase : Union[List[PIL.Image.Image], np.ndarray]
lowercase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 509 |
"""simple docstring"""
from torch import nn
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
super().__init__()
a : List[str] = class_size
a : Tuple = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
a : int = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ ( self , __UpperCAmelCase ) -> Any:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
a : int = self.mlp(__UpperCAmelCase )
return logits
| 509 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _lowercase :
def __init__( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Union[str, Any]="resnet50" , lowerCamelCase__ : int=3 , lowerCamelCase__ : Any=3_2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Optional[Any]=True , ) -> List[Any]:
"""simple docstring"""
A_ = parent
A_ = out_indices if out_indices is not None else [4]
A_ = stage_names
A_ = out_features
A_ = backbone
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = use_pretrained_backbone
A_ = is_training
def UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = self.get_config()
return config, pixel_values
def UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase ( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
A_ = TimmBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _lowercase ( __lowerCamelCase,__lowerCamelCase,__lowerCamelCase,unittest.TestCase ):
_lowercase : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
_lowercase : Dict = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
_lowercase : Tuple = False
_lowercase : Optional[int] = False
_lowercase : Any = False
_lowercase : Union[str, Any] = False
def UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
A_ = TimmBackboneModelTester(self )
A_ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
A_ = '''resnet18'''
A_ = '''microsoft/resnet-18'''
A_ = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ )
A_ = AutoBackbone.from_pretrained(lowerCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
A_ = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ , out_indices=[1, 2, 3] )
A_ = AutoBackbone.from_pretrained(lowerCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(lowerCamelCase__ )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
A_ = self.all_model_classes[0]
A_ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
A_ = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
A_ = model(**lowerCamelCase__ )
A_ = outputs[0][-1]
# Encoder-/Decoder-only models
A_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
A_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
A_ = copy.deepcopy(lowerCamelCase__ )
A_ = None
A_ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
A_ = copy.deepcopy(lowerCamelCase__ )
A_ = False
A_ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A_ = model(**lowerCamelCase__ )
| 203 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 100 ):
'''simple docstring'''
A_ = 0
A_ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 203 | 1 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE : int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE : Optional[int] ):
class lowerCAmelCase :
def __init__( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = metric_id
class lowerCAmelCase :
a : List[Any] = [MetricMock(__UpperCAmelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def lowercase ( self ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def _a ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
if "tmp_path" in args:
_SCREAMING_SNAKE_CASE = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(_SCREAMING_SNAKE_CASE , match="https://huggingface.co/docs/evaluate" ):
func(*_SCREAMING_SNAKE_CASE ) | 493 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase ( __UpperCAmelCase ):
a : Dict = DistilBertTokenizer
a : Tuple = DistilBertTokenizerFast
a : List[str] = True
@slow
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 493 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : str = '▁'
__lowerCAmelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase : str = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__lowerCAmelCase : Tuple = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
__lowerCAmelCase : List[str] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __lowerCAmelCase ( snake_case_ ):
"""simple docstring"""
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = ["input_ids", "attention_mask"]
A__ : List[int] = []
A__ : List[int] = []
def __init__( self : Union[str, Any] , _snake_case : Dict , _snake_case : List[str]="<s>" , _snake_case : List[Any]="</s>" , _snake_case : Any="</s>" , _snake_case : Tuple="<s>" , _snake_case : Dict="<unk>" , _snake_case : Tuple="<pad>" , _snake_case : Optional[Any]="<mask>" , _snake_case : List[Any]=None , _snake_case : Tuple=None , _snake_case : Union[str, Any]=None , _snake_case : List[Any] = None , _snake_case : str=None , **_snake_case : str , ):
__lowercase : str = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
__lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
__lowercase : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase : int = 1
__lowercase : Tuple = len(self.sp_model )
__lowercase : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
__lowercase : int = {v: k for k, v in self.lang_code_to_id.items()}
__lowercase : Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowercase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowercase : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__lowercase : Optional[Any] = src_lang if src_lang is not None else '''en_XX'''
__lowercase : List[str] = self.lang_code_to_id[self._src_lang]
__lowercase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple ):
__lowercase : Optional[Any] = self.__dict__.copy()
__lowercase : Dict = None
__lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , _snake_case : List[str] ):
__lowercase : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowercase : Tuple = {}
__lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def snake_case_ ( self : Optional[Any] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case_ ( self : str ):
return self._src_lang
@src_lang.setter
def snake_case_ ( self : List[str] , _snake_case : Optional[int] ):
__lowercase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def snake_case_ ( self : Optional[int] , _snake_case : Tuple , _snake_case : Optional[int] = None , _snake_case : Optional[int] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
__lowercase : Optional[int] = [1] * len(self.prefix_tokens )
__lowercase : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def snake_case_ ( self : Dict , _snake_case : Tuple , _snake_case : List[str] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self : str , _snake_case : int , _snake_case : Tuple = None ):
__lowercase : Union[str, Any] = [self.sep_token_id]
__lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : List[str] , **_snake_case : str ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__lowercase : List[Any] = src_lang
__lowercase : Dict = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
__lowercase : int = self.convert_tokens_to_ids(__UpperCAmelCase )
__lowercase : Union[str, Any] = tgt_lang_id
return inputs
def snake_case_ ( self : int ):
__lowercase : Any = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self : List[str] , _snake_case : Optional[int] ):
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def snake_case_ ( self : str , _snake_case : Optional[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase : List[Any] = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self : Optional[int] , _snake_case : List[str] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self : Optional[Any] , _snake_case : List[Any] ):
__lowercase : Optional[Any] = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def snake_case_ ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple = None ):
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase : Optional[Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
__lowercase : Dict = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def snake_case_ ( self : List[Any] , _snake_case : List[Any] , _snake_case : str = "en_XX" , _snake_case : List[Any] = None , _snake_case : List[str] = "ro_RO" , **_snake_case : List[Any] , ):
__lowercase : Union[str, Any] = src_lang
__lowercase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def snake_case_ ( self : Optional[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def snake_case_ ( self : Union[str, Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def snake_case_ ( self : Optional[Any] , _snake_case : Any ):
__lowercase : List[Any] = self.lang_code_to_id[src_lang]
__lowercase : int = []
__lowercase : Dict = [self.eos_token_id, self.cur_lang_code]
def snake_case_ ( self : int , _snake_case : Any ):
__lowercase : Any = self.lang_code_to_id[lang]
__lowercase : Tuple = []
__lowercase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
| 509 |
'''simple docstring'''
from manim import *
class lowerCamelCase__( snake_case_ ):
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = Rectangle(height=0.5 , width=0.5 )
__lowercase = Rectangle(height=0.25 , width=0.25 )
__lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = Text("""CPU""" , font_size=2_4 )
__lowercase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
__lowercase = [mem.copy() for i in range(4 )]
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = Text("""GPU""" , font_size=2_4 )
__lowercase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = Text("""Model""" , font_size=2_4 )
__lowercase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
__lowercase = []
__lowercase = []
__lowercase = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
__lowercase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = Text("""Loaded Checkpoint""" , font_size=2_4 )
__lowercase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__UpperCAmelCase )
__lowercase = []
__lowercase = []
for i, rect in enumerate(__UpperCAmelCase ):
__lowercase = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
ckpt_arr.append(__UpperCAmelCase )
__lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
__lowercase = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
__lowercase = Text("""Disk""" , font_size=2_4 )
__lowercase = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
__lowercase = []
for i, rect in enumerate(__UpperCAmelCase ):
__lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(FadeOut(__UpperCAmelCase ) )
__lowercase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , )
self.wait()
| 566 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class _UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self : Any , UpperCAmelCase : Union[str, Any]=-1):
# in NER datasets, the last column is usually reserved for NER label
SCREAMING_SNAKE_CASE_ :Optional[int] = label_idx
def _snake_case ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[int]):
if isinstance(_lowercase , _lowercase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = mode.value
SCREAMING_SNAKE_CASE_ :Tuple = os.path.join(_lowercase , F"{mode}.txt")
SCREAMING_SNAKE_CASE_ :Any = 1
SCREAMING_SNAKE_CASE_ :str = []
with open(_lowercase , encoding="utf-8") as f:
SCREAMING_SNAKE_CASE_ :List[str] = []
SCREAMING_SNAKE_CASE_ :int = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=_lowercase , labels=_lowercase))
guid_index += 1
SCREAMING_SNAKE_CASE_ :Optional[Any] = []
SCREAMING_SNAKE_CASE_ :int = []
else:
SCREAMING_SNAKE_CASE_ :Dict = line.split(" ")
words.append(splits[0])
if len(_lowercase) > 1:
labels.append(splits[self.label_idx].replace("\n" , ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=_lowercase , labels=_lowercase))
return examples
def _snake_case ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : str):
SCREAMING_SNAKE_CASE_ :Tuple = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(_lowercase)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE_ :str = line.split()[0] + ' ' + preds_list[example_id].pop(0) + '\n'
writer.write(_lowercase)
else:
logger.warning("Maximum sequence length exceeded: No prediction for \'%s\'." , line.split()[0])
def _snake_case ( self : Optional[Any] , UpperCAmelCase : int):
if path:
with open(_lowercase , "r") as f:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ :Tuple = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self : Optional[Any]):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2)
def _snake_case ( self : str , UpperCAmelCase : Any):
if path:
with open(_lowercase , "r") as f:
SCREAMING_SNAKE_CASE_ :Tuple = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ :str = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _UpperCAmelCase ( UpperCAmelCase_ ):
def _snake_case ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int):
if isinstance(_lowercase , _lowercase):
SCREAMING_SNAKE_CASE_ :Optional[int] = mode.value
SCREAMING_SNAKE_CASE_ :Any = os.path.join(_lowercase , F"{mode}.txt")
SCREAMING_SNAKE_CASE_ :Dict = 1
SCREAMING_SNAKE_CASE_ :Dict = []
with open(_lowercase , encoding="utf-8") as f:
for sentence in parse_incr(_lowercase):
SCREAMING_SNAKE_CASE_ :List[Any] = []
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
for token in sentence:
words.append(token["form"])
labels.append(token["upos"])
assert len(_lowercase) == len(_lowercase)
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=_lowercase , labels=_lowercase))
guid_index += 1
return examples
def _snake_case ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = 0
for sentence in parse_incr(_lowercase):
SCREAMING_SNAKE_CASE_ :Optional[int] = preds_list[example_id]
SCREAMING_SNAKE_CASE_ :Tuple = ''
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0)}) "
out += "\n"
writer.write(_lowercase)
example_id += 1
def _snake_case ( self : Optional[Any] , UpperCAmelCase : List[str]):
if path:
with open(_lowercase , "r") as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 707 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : List[str]=64 , UpperCAmelCase : Tuple=None):
SCREAMING_SNAKE_CASE_ :Dict = np.random.default_rng(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = length
SCREAMING_SNAKE_CASE_ :int = rng.normal(size=(length,)).astype(np.floataa)
SCREAMING_SNAKE_CASE_ :Dict = a * self.x + b + rng.normal(scale=0.1 , size=(length,)).astype(np.floataa)
def __len__( self : Optional[Any]):
return self.length
def __getitem__( self : Tuple , UpperCAmelCase : Optional[int]):
return {"x": self.x[i], "y": self.y[i]}
class _UpperCAmelCase ( torch.nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : List[str]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Optional[int]=False):
super().__init__()
SCREAMING_SNAKE_CASE_ :str = torch.nn.Parameter(torch.tensor([2, 3]).float())
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3]).float())
SCREAMING_SNAKE_CASE_ :List[Any] = True
def _snake_case ( self : int , UpperCAmelCase : Tuple=None):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
SCREAMING_SNAKE_CASE_ :Optional[Any] = False
return x * self.a[0] + self.b[0]
class _UpperCAmelCase ( torch.nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase : int=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Optional[Any]=False):
super().__init__()
SCREAMING_SNAKE_CASE_ :List[str] = torch.nn.Parameter(torch.tensor(UpperCAmelCase).float())
SCREAMING_SNAKE_CASE_ :str = torch.nn.Parameter(torch.tensor(UpperCAmelCase).float())
SCREAMING_SNAKE_CASE_ :Any = True
def _snake_case ( self : Optional[int] , UpperCAmelCase : List[str]=None):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
SCREAMING_SNAKE_CASE_ :Dict = False
return x * self.a + self.b
def lowercase ( a , a = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ :Any = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_ :List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
SCREAMING_SNAKE_CASE_ :List[Any] = load_dataset("csv" , data_files=a )
SCREAMING_SNAKE_CASE_ :List[Any] = datasets["train"].unique("label" )
SCREAMING_SNAKE_CASE_ :List[str] = {v: i for i, v in enumerate(a )}
def tokenize_function(a ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ :List[str] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=a , max_length=a , padding="max_length" )
if "label" in examples:
SCREAMING_SNAKE_CASE_ :Tuple = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ :Optional[Any] = datasets.map(
a , batched=a , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(a , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ :List[Any] = DataLoader(tokenized_datasets["train"] , shuffle=a , collate_fn=a , batch_size=2 )
SCREAMING_SNAKE_CASE_ :Dict = DataLoader(tokenized_datasets["validation"] , shuffle=a , collate_fn=a , batch_size=1 )
return train_dataloader, eval_dataloader
| 140 | 0 |
def UpperCamelCase ( __lowerCamelCase : int = 1000 ):
return sum(e for e in range(3 , __lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 204 |
import random
class UpperCAmelCase :
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
snake_case : int = [ord(snake_case__ ) for i in text]
snake_case : Optional[int] = []
snake_case : int = []
for i in plain:
snake_case : List[Any] = random.randint(1 , 3_00 )
snake_case : List[Any] = (i + k) * k
cipher.append(snake_case__ )
key.append(snake_case__ )
return cipher, key
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : list[int] , snake_case__ : list[int] ) -> str:
'''simple docstring'''
snake_case : int = []
for i in range(len(snake_case__ ) ):
snake_case : List[str] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case__ ) )
return "".join(snake_case__ )
if __name__ == "__main__":
__lowerCamelCase, __lowerCamelCase = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 204 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Any = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "blenderbot-small"
A__ = ["past_key_values"]
A__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : str=512 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Any=2048 , __lowerCamelCase : List[str]=16 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Dict=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : str=2 , __lowerCamelCase : Any=2 , **__lowerCamelCase : int , ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[int] = encoder_ffn_dim
lowerCamelCase__ : Dict = encoder_layers
lowerCamelCase__ : Any = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase__ : str = decoder_layers
lowerCamelCase__ : Optional[Any] = decoder_attention_heads
lowerCamelCase__ : List[str] = dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : Dict = activation_dropout
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : Dict = init_std
lowerCamelCase__ : List[str] = encoder_layerdrop
lowerCamelCase__ : Dict = decoder_layerdrop
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ : Union[str, Any] = {0: "batch"}
lowerCamelCase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase__ : Tuple = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase__ : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
lowerCamelCase__ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] = super().outputs
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
lowerCamelCase__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
lowerCamelCase__ : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase ( self : int , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
lowerCamelCase__ : List[str] = seq_length if not self.use_past else 1
lowerCamelCase__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : Tuple = common_inputs["input_ids"].shape
lowerCamelCase__ : int = common_inputs["decoder_input_ids"].shape[1]
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.num_attention_heads
lowerCamelCase__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Optional[int] = decoder_seq_length + 3
lowerCamelCase__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : List[Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : str = self.num_layers
lowerCamelCase__ : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
lowerCamelCase__ : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
lowerCamelCase__ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__ : str = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.num_layers
lowerCamelCase__ , lowerCamelCase__ : int = self.num_attention_heads
lowerCamelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : Union[str, Any] = common_inputs["attention_mask"].dtype
lowerCamelCase__ : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
lowerCamelCase__ : Tuple = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
lowerCamelCase__ : str = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
lowerCamelCase__ : Dict = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
elif self.task == "causal-lm":
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
lowerCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def lowerCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
lowerCamelCase__ : int = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
| 5 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)})
A__ = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Tuple = is_language_sensitive
lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
lowerCamelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase__ : str = mode
# Load data features from cache or dataset file
lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Optional[Any] = self.old_features["features"]
lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
lowerCamelCase__ : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.features[i]
lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 5 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCamelCase ( snake_case__ = "isbn/0140328726" ):
A_ : Tuple = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
A_ : Any = F"""{olid} is not a valid Open Library olid"""
raise ValueError(snake_case__ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def __UpperCamelCase ( snake_case__ ):
A_ : Any = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
A_ : Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A_ : List[str] = [
get_openlibrary_data(author["""key"""] )["name"] for author in data["Authors"]
]
A_ : str = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A_ : List[Any] = ", ".join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowerCAmelCase = input("\nEnter the ISBN code to search (or \'quit\' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
_lowerCAmelCase = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print("\n".join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.')
| 180 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCAmelCase__( lowercase : str ) -> List[str]:
__snake_case : Any = int(lowercase )
__snake_case , __snake_case , __snake_case : List[str] = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def lowerCAmelCase__( lowercase : Tuple , lowercase : List[Any] , lowercase : Any , lowercase : Tuple , lowercase : Dict=300 ) -> int:
# docstyle-ignore
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def lowerCAmelCase__( lowercase : Dict ) -> Union[str, Any]:
__snake_case : Any = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__snake_case : List[str] = f"""{elt:.6f}""" if isinstance(lowercase , lowercase ) else str(lowercase )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : List[str] =5
UpperCAmelCase_ : Optional[int] =0.2
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 300 , ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = total
__snake_case : List[Any] = "" if prefix is None else prefix
__snake_case : Any = leave
__snake_case : Optional[Any] = parent
__snake_case : Any = width
__snake_case : List[Any] = None
__snake_case : str = None
__snake_case : Union[str, Any] = None
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None ) -> Optional[Any]:
'''simple docstring'''
__snake_case : int = value
if comment is not None:
__snake_case : str = comment
if self.last_value is None:
__snake_case : Optional[Any] = time.time()
__snake_case : Union[str, Any] = value
__snake_case : Optional[int] = None
__snake_case : Optional[int] = self.warmup
__snake_case : Optional[int] = 1
self.update_bar(UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__snake_case : List[str] = time.time()
__snake_case : Optional[Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__snake_case : List[str] = self.elapsed_time / (value - self.start_value)
else:
__snake_case : str = None
if value >= self.total:
__snake_case : Union[str, Any] = self.total
__snake_case : Dict = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__snake_case : Tuple = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCAmelCase )
__snake_case : str = value
__snake_case : Union[str, Any] = current_time
if self.average_time_per_item is None:
__snake_case : int = 1
else:
__snake_case : Any = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
__snake_case : List[str] = " " * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase )
if self.elapsed_time is None:
__snake_case : List[str] = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
__snake_case : int = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
__snake_case : List[str] = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Optional[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__snake_case : List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
__snake_case : Optional[Any] = None if column_names is None else [column_names]
__snake_case : Union[str, Any] = None
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[int] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__snake_case : Tuple = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
if self.inner_table is None:
__snake_case : str = [list(values.keys() ), list(values.values() )]
else:
__snake_case : List[Any] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCAmelCase )
__snake_case : Any = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=300 ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase )
return self.child_bar
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = None
self.display()
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self ) -> str:
'''simple docstring'''
__snake_case : List[str] = None
__snake_case : List[Any] = None
__snake_case : Dict = False
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__snake_case : str = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
__snake_case : Optional[Any] = 0
__snake_case : Tuple = 0
__snake_case : str = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
__snake_case : int = NotebookTrainingTracker(state.max_steps , UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
'''simple docstring'''
__snake_case : List[str] = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
__snake_case : Optional[Any] = False
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) -> Any:
'''simple docstring'''
if not has_length(UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__snake_case : List[str] = self.training_tracker.add_child(len(UpperCAmelCase ) )
else:
__snake_case : int = NotebookProgressBar(len(UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__snake_case : Optional[Any] = None
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__snake_case : int = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
__snake_case : Optional[Any] = state.global_step
self.training_tracker.write_line(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) -> int:
'''simple docstring'''
if self.training_tracker is not None:
__snake_case : Optional[Any] = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
__snake_case : Optional[int] = log["loss"]
break
if self.first_column == "Epoch":
__snake_case : Union[str, Any] = int(state.epoch )
else:
__snake_case : List[Any] = state.global_step
__snake_case : Tuple = "eval"
for k in metrics:
if k.endswith("_loss" ):
__snake_case : Any = re.sub(r"\_loss$" , "" , UpperCAmelCase )
__snake_case : Dict = metrics.pop("total_flos" , UpperCAmelCase )
__snake_case : Optional[Any] = metrics.pop("epoch" , UpperCAmelCase )
__snake_case : int = metrics.pop(F"""{metric_key_prefix}_runtime""" , UpperCAmelCase )
__snake_case : List[str] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , UpperCAmelCase )
__snake_case : Union[str, Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , UpperCAmelCase )
__snake_case : Union[str, Any] = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , UpperCAmelCase )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
__snake_case : Tuple = v
else:
__snake_case : str = k.split("_" )
__snake_case : int = " ".join([part.capitalize() for part in splits[1:]] )
__snake_case : Optional[int] = v
self.training_tracker.write_line(UpperCAmelCase )
self.training_tracker.remove_child()
__snake_case : Optional[Any] = None
# Evaluation takes a long time so we should force the next update.
__snake_case : Any = True
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=UpperCAmelCase )
__snake_case : Optional[Any] = None
| 243 | 0 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
if b == 0:
return (1, 0)
(_lowercase) : Union[str, Any] = extended_euclid(__UpperCamelCase , a % b )
_lowercase : Optional[int] = a // b
return (y, x - k * y)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
(_lowercase) : List[str] = extended_euclid(__UpperCamelCase , __UpperCamelCase )
_lowercase : Union[str, Any] = na * na
_lowercase : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
(_lowercase) : List[str] = extended_euclid(__UpperCamelCase , __UpperCamelCase )
if b < 0:
_lowercase : Dict = (b % n + n) % n
return b
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase : Optional[int] = invert_modulo(__UpperCamelCase , __UpperCamelCase ), invert_modulo(__UpperCamelCase , __UpperCamelCase )
_lowercase : Union[str, Any] = na * na
_lowercase : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 721 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE : str = "RegNetConfig"
# Base docstring
SCREAMING_SNAKE_CASE : Tuple = "facebook/regnet-y-040"
SCREAMING_SNAKE_CASE : Dict = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE : Optional[Any] = "facebook/regnet-y-040"
SCREAMING_SNAKE_CASE : List[str] = "tabby, tabby cat"
SCREAMING_SNAKE_CASE : Optional[int] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase = 3, lowerCamelCase = 1, lowerCamelCase = 1, lowerCamelCase = "relu", **lowerCamelCase, ) -> int:
"""simple docstring"""
super().__init__(**lowerCamelCase)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowercase : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
_lowercase : List[Any] = tf.keras.layers.ConvaD(
filters=lowerCamelCase, kernel_size=lowerCamelCase, strides=lowerCamelCase, padding='VALID', groups=lowerCamelCase, use_bias=lowerCamelCase, name='convolution', )
_lowercase : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5, momentum=0.9, name='normalization')
_lowercase : Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = self.convolution(self.padding(lowerCamelCase))
_lowercase : int = self.normalization(lowerCamelCase)
_lowercase : Optional[Any] = self.activation(lowerCamelCase)
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Tuple = config.num_channels
_lowercase : Tuple = TFRegNetConvLayer(
out_channels=config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act, name='embedder', )
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = shape_list(lowerCamelCase)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowercase : Optional[Any] = tf.transpose(lowerCamelCase, perm=(0, 2, 3, 1))
_lowercase : int = self.embedder(lowerCamelCase)
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase = 2, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : List[Any] = tf.keras.layers.ConvaD(
filters=lowerCamelCase, kernel_size=1, strides=lowerCamelCase, use_bias=lowerCamelCase, name='convolution')
_lowercase : Optional[int] = tf.keras.layers.BatchNormalization(epsilon=1E-5, momentum=0.9, name='normalization')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False) -> tf.Tensor:
"""simple docstring"""
return self.normalization(self.convolution(lowerCamelCase), training=lowerCamelCase)
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase, name='pooler')
_lowercase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase, kernel_size=1, activation='relu', name='attention.0'),
tf.keras.layers.ConvaD(filters=lowerCamelCase, kernel_size=1, activation='sigmoid', name='attention.2'),
]
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[int] = self.pooler(lowerCamelCase)
for layer_module in self.attention:
_lowercase : Optional[int] = layer_module(lowerCamelCase)
_lowercase : Optional[int] = hidden_state * pooled
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Optional[Any] = in_channels != out_channels or stride != 1
_lowercase : List[Any] = max(1, out_channels // config.groups_width)
_lowercase : List[str] = (
TFRegNetShortCut(lowerCamelCase, stride=lowerCamelCase, name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear', name='shortcut')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowercase : str = [
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=config.hidden_act, name='layer.0'),
TFRegNetConvLayer(
lowerCamelCase, stride=lowerCamelCase, groups=lowerCamelCase, activation=config.hidden_act, name='layer.1'),
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=lowerCamelCase, name='layer.2'),
]
_lowercase : Tuple = ACTaFN[config.hidden_act]
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = hidden_state
for layer_module in self.layers:
_lowercase : List[Any] = layer_module(lowerCamelCase)
_lowercase : Dict = self.shortcut(lowerCamelCase)
hidden_state += residual
_lowercase : Tuple = self.activation(lowerCamelCase)
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1, **lowerCamelCase) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Dict = in_channels != out_channels or stride != 1
_lowercase : str = max(1, out_channels // config.groups_width)
_lowercase : Tuple = (
TFRegNetShortCut(lowerCamelCase, stride=lowerCamelCase, name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear', name='shortcut')
)
_lowercase : Union[str, Any] = [
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=config.hidden_act, name='layer.0'),
TFRegNetConvLayer(
lowerCamelCase, stride=lowerCamelCase, groups=lowerCamelCase, activation=config.hidden_act, name='layer.1'),
TFRegNetSELayer(lowerCamelCase, reduced_channels=int(round(in_channels / 4)), name='layer.2'),
TFRegNetConvLayer(lowerCamelCase, kernel_size=1, activation=lowerCamelCase, name='layer.3'),
]
_lowercase : Any = ACTaFN[config.hidden_act]
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Dict = hidden_state
for layer_module in self.layers:
_lowercase : Any = layer_module(lowerCamelCase)
_lowercase : Optional[int] = self.shortcut(lowerCamelCase)
hidden_state += residual
_lowercase : Optional[int] = self.activation(lowerCamelCase)
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 2, lowerCamelCase = 2, **lowerCamelCase) -> Tuple:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowercase : Any = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase, lowerCamelCase, lowerCamelCase, stride=lowerCamelCase, name='layers.0'),
*[layer(lowerCamelCase, lowerCamelCase, lowerCamelCase, name=F'''layers.{i+1}''') for i in range(depth - 1)],
]
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
for layer_module in self.layers:
_lowercase : Tuple = layer_module(lowerCamelCase)
return hidden_state
class _lowerCamelCase( tf.keras.layers.Layer ):
def __init__( self, lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : List[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name='stages.0', ))
_lowercase : str = zip(config.hidden_sizes, config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase, config.depths[1:])):
self.stages.append(TFRegNetStage(lowerCamelCase, lowerCamelCase, lowerCamelCase, depth=lowerCamelCase, name=F'''stages.{i+1}'''))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = True) -> TFBaseModelOutputWithNoAttention:
"""simple docstring"""
_lowercase : int = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase : Optional[Any] = hidden_states + (hidden_state,)
_lowercase : Tuple = stage_module(lowerCamelCase)
if output_hidden_states:
_lowercase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase, hidden_states=lowerCamelCase)
@keras_serializable
class _lowerCamelCase( tf.keras.layers.Layer ):
lowercase_ : Dict = RegNetConfig
def __init__( self, lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : int = config
_lowercase : List[Any] = TFRegNetEmbeddings(lowerCamelCase, name='embedder')
_lowercase : List[Any] = TFRegNetEncoder(lowerCamelCase, name='encoder')
_lowercase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase, name='pooler')
@unpack_inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ) -> TFBaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_lowercase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase : Dict = self.embedder(lowerCamelCase, training=lowerCamelCase)
_lowercase : int = self.encoder(
lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase, training=lowerCamelCase)
_lowercase : Optional[int] = encoder_outputs[0]
_lowercase : List[Any] = self.pooler(lowerCamelCase)
# Change to NCHW output format have uniformity in the modules
_lowercase : Tuple = tf.transpose(lowerCamelCase, perm=(0, 3, 1, 2))
_lowercase : Tuple = tf.transpose(lowerCamelCase, perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowercase : Union[str, Any] = tuple([tf.transpose(lowerCamelCase, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase, pooler_output=lowerCamelCase, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, )
class _lowerCamelCase( _a ):
lowercase_ : List[str] = RegNetConfig
lowercase_ : Optional[int] = """regnet"""
lowercase_ : int = """pixel_values"""
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24), dtype=tf.floataa)}
SCREAMING_SNAKE_CASE : Union[str, Any] = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
SCREAMING_SNAKE_CASE : Tuple = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""", _a, )
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
super().__init__(lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
_lowercase : List[Any] = TFRegNetMainLayer(lowerCamelCase, name='regnet')
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=lowerCamelCase, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
"""simple docstring"""
_lowercase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase : str = self.regnet(
pixel_values=lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase, training=lowerCamelCase, )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""", _a, )
class _lowerCamelCase( _a, _a ):
def __init__( self, lowerCamelCase, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
super().__init__(lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
_lowercase : Dict = config.num_labels
_lowercase : Optional[int] = TFRegNetMainLayer(lowerCamelCase, name='regnet')
# classification head
_lowercase : Tuple = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels, name='classifier.1') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=lowerCamelCase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def UpperCamelCase ( self, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
"""simple docstring"""
_lowercase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase : Dict = self.regnet(
lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase, training=lowerCamelCase)
_lowercase : List[str] = outputs.pooler_output if return_dict else outputs[1]
_lowercase : Union[str, Any] = self.classifier[0](lowerCamelCase)
_lowercase : List[Any] = self.classifier[1](lowerCamelCase)
_lowercase : Optional[Any] = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase, logits=lowerCamelCase)
if not return_dict:
_lowercase : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase, logits=lowerCamelCase, hidden_states=outputs.hidden_states)
| 354 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : int = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = XGLMTokenizer
UpperCamelCase = XGLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = XGLMTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = '<pad>'
lowerCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(A_ ) , 1008 )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = XGLMTokenizer(A_ , keep_accents=A_ )
lowerCamelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def a__ ( self : str ) -> Optional[int]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A_ , f.name )
lowerCamelCase_ = XGLMTokenizer(f.name , keep_accents=A_ )
lowerCamelCase_ = pickle.dumps(A_ )
pickle.loads(A_ )
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'I was born in 92000, and this is falsé.'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 'Hello World!'
lowerCamelCase_ = [2, 31227, 4447, 35]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowerCamelCase_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = {
'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='facebook/xglm-564M' , padding=A_ , )
| 70 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Dict , A_ : int=7 , A_ : Any=3 , A_ : List[str]=30 , A_ : Union[str, Any]=400 , A_ : List[str]=True , A_ : int=None , A_ : Any=True , A_ : str=1 / 255 , A_ : int=True , A_ : List[Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=True , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : Any=False ) -> Union[str, Any]:
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(A_ , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(A_ , key=lambda A_ : item[0] )[0]
lowerCamelCase_ = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DetrImageProcessor if is_vision_available() else None
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'do_rescale' ) )
self.assertTrue(hasattr(A_ , 'rescale_factor' ) )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , A_ )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
pass
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
@slow
def a__ ( self : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
lowerCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A_ )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
| 70 | 1 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__SCREAMING_SNAKE_CASE :List[str] = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
__SCREAMING_SNAKE_CASE :Tuple = dataset.iloc[:, 1:2].values
__SCREAMING_SNAKE_CASE :Optional[Any] = dataset.iloc[:, 2].values
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = train_test_split(X, y, test_size=0.2, random_state=0)
__SCREAMING_SNAKE_CASE :Any = PolynomialFeatures(degree=4)
__SCREAMING_SNAKE_CASE :Optional[int] = poly_reg.fit_transform(X)
__SCREAMING_SNAKE_CASE :Tuple = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
plt.scatter(__lowercase , __lowercase , color="red" )
plt.plot(__lowercase , pol_reg.predict(poly_reg.fit_transform(__lowercase ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 119 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase_ ( __lowercase : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __lowercase ).groups()[0]
class A_ ( lowerCAmelCase_ ):
def __init__( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int=None , snake_case_ : List[Any]=None ):
_UpperCAmelCase = file_names
_UpperCAmelCase = image_transform
_UpperCAmelCase = label_to_id
def __len__( self : Union[str, Any] ):
return len(self.file_names )
def __getitem__( self : Any , snake_case_ : Tuple ):
_UpperCAmelCase = self.file_names[idx]
_UpperCAmelCase = PIL.Image.open(snake_case_ )
_UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
_UpperCAmelCase = self.image_transform(snake_case_ )
_UpperCAmelCase = extract_label(snake_case_ )
if self.label_to_id is not None:
_UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict ) -> Optional[int]:
'''simple docstring'''
if args.with_tracking:
_UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
_UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["lr"]
_UpperCAmelCase = int(config["num_epochs"] )
_UpperCAmelCase = int(config["seed"] )
_UpperCAmelCase = int(config["batch_size"] )
_UpperCAmelCase = config["image_size"]
if not isinstance(__lowercase , (list, tuple) ):
_UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
_UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
_UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_UpperCAmelCase = os.path.split(__lowercase )[-1].split("." )[0]
accelerator.init_trackers(__lowercase , __lowercase )
# Grab all the image filenames
_UpperCAmelCase = [os.path.join(args.data_dir , __lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
_UpperCAmelCase = [extract_label(__lowercase ) for fname in file_names]
_UpperCAmelCase = list(set(__lowercase ) )
id_to_label.sort()
_UpperCAmelCase = {lbl: i for i, lbl in enumerate(__lowercase )}
# Set the seed before splitting the data.
np.random.seed(__lowercase )
torch.manual_seed(__lowercase )
torch.cuda.manual_seed_all(__lowercase )
# Split our filenames between train and validation
_UpperCAmelCase = np.random.permutation(len(__lowercase ) )
_UpperCAmelCase = int(0.8 * len(__lowercase ) )
_UpperCAmelCase = random_perm[:cut]
_UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_UpperCAmelCase = Compose([RandomResizedCrop(__lowercase , scale=(0.5, 1.0) ), ToTensor()] )
_UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__lowercase , label_to_id=__lowercase )
# For evaluation, we use a deterministic Resize
_UpperCAmelCase = Compose([Resize(__lowercase ), ToTensor()] )
_UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__lowercase , label_to_id=__lowercase )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 )
_UpperCAmelCase = DataLoader(__lowercase , shuffle=__lowercase , batch_size=__lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = create_model("resnet50d" , pretrained=__lowercase , num_classes=len(__lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_UpperCAmelCase = False
for param in model.get_classifier().parameters():
_UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
_UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
_UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_UpperCAmelCase = OneCycleLR(optimizer=__lowercase , max_lr=__lowercase , epochs=__lowercase , steps_per_epoch=len(__lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
_UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
_UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_UpperCAmelCase = os.path.splitext(__lowercase )[0]
if "epoch" in training_difference:
_UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
_UpperCAmelCase = None
else:
_UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
_UpperCAmelCase = resume_step // len(__lowercase )
resume_step -= starting_epoch * len(__lowercase )
# Now we train the model
for epoch in range(__lowercase , __lowercase ):
model.train()
if args.with_tracking:
_UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_UpperCAmelCase = accelerator.skip_first_batches(__lowercase , __lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCAmelCase = (batch["image"] - mean) / std
_UpperCAmelCase = model(__lowercase )
_UpperCAmelCase = torch.nn.functional.cross_entropy(__lowercase , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__lowercase , __lowercase ):
_UpperCAmelCase = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_UpperCAmelCase = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
model.eval()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for step, batch in enumerate(__lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
_UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
_UpperCAmelCase = model(__lowercase )
_UpperCAmelCase = outputs.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
_UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__lowercase ),
"epoch": epoch,
} , step=__lowercase , )
if checkpointing_steps == "epoch":
_UpperCAmelCase = f'epoch_{epoch}'
if args.output_dir is not None:
_UpperCAmelCase = os.path.join(args.output_dir , __lowercase )
accelerator.save_state(__lowercase )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__lowercase , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__lowercase , default=__lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__lowercase , default=__lowercase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__lowercase , default=__lowercase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__lowercase , __lowercase )
if __name__ == "__main__":
main()
| 119 | 1 |
"""simple docstring"""
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 0, __lowerCamelCase = 0 ):
"""simple docstring"""
_lowerCAmelCase = right or len(__lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__lowerCamelCase, __lowerCamelCase, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 589 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int | float | str , SCREAMING_SNAKE_CASE : int | float | str ):
if nth_term == "":
return [""]
UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
UpperCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE ) ):
series.append(f'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : List[Any] = int(input('Enter the last number (nth term) of the P-Series'))
_a : Tuple = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 447 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ ):
if not isinstance(A__ , A__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
a_ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 511 |
'''simple docstring'''
def UpperCamelCase_ ( A__ ):
if n_term == "":
return []
a_ = []
for temp in range(int(A__ ) ):
series.append(F'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 511 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : str ) -> Tuple:
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=__snake_case , )
assert hasattr(self , """env""" )
def lowercase__ ( self : Tuple , __snake_case : Union[str, Any] ) -> Any:
# configuration for running training on smdistributed Model Parallel
_lowerCAmelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
_lowerCAmelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
_lowerCAmelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
_lowerCAmelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version="""py36""" , )
def lowercase__ ( self : List[Any] , __snake_case : str ) -> Dict:
TrainingJobAnalytics(__snake_case ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def lowercase__ ( self : Any , __snake_case : List[str] ) -> Dict:
# create estimator
_lowerCAmelCase = self.create_estimator(__snake_case )
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __snake_case )
| 207 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int ={
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =[
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 207 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0 , ):
"""simple docstring"""
UpperCamelCase : Tuple = parent
UpperCamelCase : str = batch_size
UpperCamelCase : Dict = seq_length
UpperCamelCase : List[Any] = is_training
UpperCamelCase : List[Any] = use_input_mask
UpperCamelCase : List[Any] = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : Any = type_vocab_size
UpperCamelCase : Dict = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[str] = num_labels
UpperCamelCase : Union[str, Any] = num_choices
UpperCamelCase : Any = scope
UpperCamelCase : List[Any] = projection_dim
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Dict = None
if self.use_token_type_ids:
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[int] = None
UpperCamelCase : Tuple = None
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
UpperCamelCase : Union[str, Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = TFDPRContextEncoder(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = TFDPRQuestionEncoder(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = TFDPRReader(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : int = config_and_inputs
UpperCamelCase : str = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Any = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__UpperCamelCase : Dict = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[int] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = TFDPRModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = TFDPRContextEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[int] = TFDPRContextEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : int = TFDPRQuestionEncoder.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = TFDPRReader.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
UpperCamelCase : Tuple = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCamelCase : Optional[Any] = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 643 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : List[Any] = True
except ImportError:
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def a ( SCREAMING_SNAKE_CASE_ : Namespace ):
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@staticmethod
def _lowercase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = testing
UpperCamelCase : Any = testing_file
UpperCamelCase : Dict = path
def _lowercase ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCamelCase : Dict = (
Path(__SCREAMING_SNAKE_CASE ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase : List[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : Dict = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCamelCase : Tuple = json.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = configuration['''lowercase_modelname''']
UpperCamelCase : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"""{directory}/configuration.json""" )
UpperCamelCase : str = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Any = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Union[str, Any] = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCamelCase : Optional[Any] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=__SCREAMING_SNAKE_CASE )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , '''w''' ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
UpperCamelCase : Any = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Create temp file
UpperCamelCase , UpperCamelCase : Optional[Any] = mkstemp()
UpperCamelCase : Tuple = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''' ) as new_file:
with open(__SCREAMING_SNAKE_CASE ) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE )
if line_to_copy_below in line:
UpperCamelCase : Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Remove original file
remove(__SCREAMING_SNAKE_CASE )
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def skip_units(__SCREAMING_SNAKE_CASE ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE ) as datafile:
UpperCamelCase : int = []
UpperCamelCase : Dict = False
UpperCamelCase : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : int = skip_units(__SCREAMING_SNAKE_CASE )
elif "# Below: " in line and "##" not in line:
UpperCamelCase : Dict = line.split('''"''' )[1]
UpperCamelCase : List[str] = skip_units(__SCREAMING_SNAKE_CASE )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase : Tuple = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE )
remove(__SCREAMING_SNAKE_CASE )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(__SCREAMING_SNAKE_CASE )
| 643 | 1 |
UpperCamelCase : int = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 37 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Union[str, Any] = '''convbert'''
def __init__( self , a_=30522 , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=2 , a_=0.02 , a_=1e-12 , a_=1 , a_=0 , a_=2 , a_=768 , a_=2 , a_=9 , a_=1 , a_=None , **a_ , ) -> Tuple:
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase ):
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 657 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Tuple = {"""vocab_file""": """spiece.model"""}
a : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
a : Dict = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = []
def __init__( self , A , A="<unk>" , A="<s>" , A="</s>" , A="<pad>" , A="[SEP]" , A="[MASK]" , A="[CLS]" , A = None , **A , ) -> None:
UpperCAmelCase : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
UpperCAmelCase : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
UpperCAmelCase : Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sep_token=A , mask_token=A , cls_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def _lowercase( self ) -> Optional[int]:
return self.sp_model.get_piece_size()
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self , A ) -> int:
UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : int = {}
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def _lowercase( self , A ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Dict = self.sp_model.IdToPiece(A )
return token
def _lowercase( self , A ) -> List[Any]:
UpperCAmelCase : int = []
UpperCAmelCase : int = """"""
UpperCAmelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(A )
UpperCAmelCase : Dict = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def _lowercase( self , A , A = False , A = None , A = True , **A , ) -> str:
UpperCAmelCase : Tuple = kwargs.pop("""use_source_tokenizer""" , A )
UpperCAmelCase : str = self.convert_ids_to_tokens(A , skip_special_tokens=A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase : Tuple = []
UpperCAmelCase : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
UpperCAmelCase : int = []
sub_texts.append(A )
else:
current_sub_text.append(A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase : List[Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(A ) )
else:
UpperCAmelCase : str = """""".join(A )
UpperCAmelCase : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase : List[Any] = self.clean_up_tokenization(A )
return clean_text
else:
return text
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
UpperCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : int = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 672 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 1 |
from __future__ import annotations
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 | def __UpperCAmelCase ( UpperCAmelCase = 50 )-> int:
"""simple docstring"""
lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 604 | 0 |
from __future__ import annotations
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = str(_lowercase )
return n == n[::-1]
def A ( _lowercase = 1_000_000 ):
SCREAMING_SNAKE_CASE : int = 0
for i in range(1 , _lowercase ):
if is_palindrome(_lowercase ) and is_palindrome(bin(_lowercase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 34 | import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase__ = False
class __magic_name__ (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> List[str]:
lowerCAmelCase_ = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe.dual_guided(
prompt="first prompt" , image=_a , text_to_image_strength=0.7_5 , generator=_a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
lowerCAmelCase_ = VersatileDiffusionPipeline.from_pretrained(_a , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = generator.manual_seed(0 )
lowerCAmelCase_ = pipe.dual_guided(
prompt="first prompt" , image=_a , text_to_image_strength=0.7_5 , generator=_a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __a ( self ) -> Dict:
lowerCAmelCase_ = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = "cyberpunk 2077"
lowerCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe.dual_guided(
prompt=_a , image=_a , text_to_image_strength=0.7_5 , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
lowerCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase_ = "A painting of a squirrel eating a burger "
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe.text_to_image(
prompt=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowerCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCAmelCase_ = pipe.image_variation(_a , generator=_a , output_type="numpy" ).images
lowerCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 122 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 | 1 |
from __future__ import annotations
def lowercase_ (A : list[int] , A : int ):
if len(A ) < k or k < 0:
raise ValueError('Invalid Input' )
snake_case__ : Optional[Any] = sum(array[:k] )
for i in range(len(A ) - k ):
snake_case__ : Any = current_sum - array[i] + array[i + k]
snake_case__ : int = max(A , A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
a_ :str = [randint(-1_000, 1_000) for i in range(100)]
a_ :str = randint(0, 110)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 713 |
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""note_seq"""]
def __init__( self : List[Any], *_snake_case : Dict, **_snake_case : List[Any] ) ->Optional[int]:
requires_backends(self, ['note_seq'] )
@classmethod
def lowercase_ ( cls : int, *_snake_case : Tuple, **_snake_case : Optional[Any] ) ->int:
requires_backends(cls, ['note_seq'] )
@classmethod
def lowercase_ ( cls : Optional[int], *_snake_case : str, **_snake_case : Tuple ) ->int:
requires_backends(cls, ['note_seq'] )
| 243 | 0 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Tuple=7 ,__SCREAMING_SNAKE_CASE : Tuple=3 ,__SCREAMING_SNAKE_CASE : str=3_0 ,__SCREAMING_SNAKE_CASE : Any=4_0_0 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=True ,__SCREAMING_SNAKE_CASE : Optional[int]=None ,__SCREAMING_SNAKE_CASE : List[str]=True ,__SCREAMING_SNAKE_CASE : Tuple=1 / 2_5_5 ,__SCREAMING_SNAKE_CASE : List[Any]=True ,__SCREAMING_SNAKE_CASE : Tuple=[0.5, 0.5, 0.5] ,__SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] ,__SCREAMING_SNAKE_CASE : Tuple=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean
UpperCAmelCase = image_std
UpperCAmelCase = do_pad
def _UpperCAmelCase ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _UpperCAmelCase ( self : str ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Dict=False ):
if not batched:
UpperCAmelCase = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE ,Image.Image ):
UpperCAmelCase , UpperCAmelCase = image.size
else:
UpperCAmelCase , UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase = self.size["shortest_edge"]
UpperCAmelCase = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase = self.size["shortest_edge"]
UpperCAmelCase = self.size["shortest_edge"]
else:
UpperCAmelCase = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase = max(__SCREAMING_SNAKE_CASE ,key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCAmelCase = max(__SCREAMING_SNAKE_CASE ,key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __magic_name__ ( _a , unittest.TestCase):
_UpperCAmelCase : Union[str, Any] = DetrImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = DetrImageProcessingTester(self )
@property
def _UpperCAmelCase ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"image_mean" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"image_std" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"do_normalize" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"do_rescale" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"rescale_factor" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"do_resize" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"size" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"do_pad" ) )
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict ,size=4_2 ,max_size=8_4 ,pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size ,{"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ):
pass
def _UpperCAmelCase ( self : Any ):
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ,batched=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = image_processing(__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _UpperCAmelCase ( self : Optional[int] ):
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__SCREAMING_SNAKE_CASE ,numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
UpperCAmelCase = image_processing(__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ,batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _UpperCAmelCase ( self : int ):
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__SCREAMING_SNAKE_CASE ,torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
UpperCAmelCase = image_processing(__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ,batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def _UpperCAmelCase ( self : Tuple ):
# prepare image and target
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" ,"r" ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
UpperCAmelCase = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
UpperCAmelCase = image_processing(images=__SCREAMING_SNAKE_CASE ,annotations=__SCREAMING_SNAKE_CASE ,return_tensors="pt" )
# verify pixel values
UpperCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
# verify area
UpperCAmelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] ,__SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# verify image_id
UpperCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] ,__SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] ,__SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCAmelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] ,__SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] ,__SCREAMING_SNAKE_CASE ) )
# verify size
UpperCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] ,__SCREAMING_SNAKE_CASE ) )
@slow
def _UpperCAmelCase ( self : Any ):
# prepare image, target and masks_path
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" ,"r" ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
UpperCAmelCase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCAmelCase = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
UpperCAmelCase = image_processing(images=__SCREAMING_SNAKE_CASE ,annotations=__SCREAMING_SNAKE_CASE ,masks_path=__SCREAMING_SNAKE_CASE ,return_tensors="pt" )
# verify pixel values
UpperCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
# verify area
UpperCAmelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] ,__SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# verify image_id
UpperCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] ,__SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] ,__SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCAmelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] ,__SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCAmelCase = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() ,__SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] ,__SCREAMING_SNAKE_CASE ) )
# verify size
UpperCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] ,__SCREAMING_SNAKE_CASE ) )
| 405 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__lowerCAmelCase ={
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__lowerCAmelCase =logging.get_logger(__name__)
class __magic_name__ ( _a):
_UpperCAmelCase : Tuple = 'maskformer'
_UpperCAmelCase : Dict = {'hidden_size': 'mask_feature_size'}
_UpperCAmelCase : Dict = ['resnet', 'swin']
_UpperCAmelCase : Tuple = ['detr']
def __init__( self : Dict ,__SCREAMING_SNAKE_CASE : int = 2_5_6 ,__SCREAMING_SNAKE_CASE : int = 2_5_6 ,__SCREAMING_SNAKE_CASE : float = 0.1 ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : Optional[Dict] = None ,__SCREAMING_SNAKE_CASE : Optional[Dict] = None ,__SCREAMING_SNAKE_CASE : float = 0.02 ,__SCREAMING_SNAKE_CASE : float = 1.0 ,__SCREAMING_SNAKE_CASE : float = 1.0 ,__SCREAMING_SNAKE_CASE : float = 1.0 ,__SCREAMING_SNAKE_CASE : float = 20.0 ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase = SwinConfig(
image_size=3_8_4 ,in_channels=3 ,patch_size=4 ,embed_dim=1_2_8 ,depths=[2, 2, 1_8, 2] ,num_heads=[4, 8, 1_6, 3_2] ,window_size=1_2 ,drop_path_rate=0.3 ,out_features=["stage1", "stage2", "stage3", "stage4"] ,)
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = backbone_config.pop("model_type" )
UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase = config_class.from_dict(__SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase = (
decoder_config.pop("model_type" ) if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = CONFIG_MAPPING[decoder_type]
UpperCAmelCase = config_class.from_dict(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = backbone_config
UpperCAmelCase = decoder_config
# main feature dimension for the model
UpperCAmelCase = fpn_feature_size
UpperCAmelCase = mask_feature_size
# initializer
UpperCAmelCase = init_std
UpperCAmelCase = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase = cross_entropy_weight
UpperCAmelCase = dice_weight
UpperCAmelCase = mask_weight
UpperCAmelCase = use_auxiliary_loss
UpperCAmelCase = no_object_weight
UpperCAmelCase = output_auxiliary_logits
UpperCAmelCase = self.decoder_config.encoder_attention_heads
UpperCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**__SCREAMING_SNAKE_CASE )
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] ,__SCREAMING_SNAKE_CASE : PretrainedConfig ,__SCREAMING_SNAKE_CASE : PretrainedConfig ,**__SCREAMING_SNAKE_CASE : str ):
return cls(
backbone_config=__SCREAMING_SNAKE_CASE ,decoder_config=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
def _UpperCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.backbone_config.to_dict()
UpperCAmelCase = self.decoder_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 405 | 1 |
from __future__ import annotations
def _a ( UpperCAmelCase ) -> list[int]: # This function is recursive
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = len(a_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase__ : List[str] = array[0]
lowerCamelCase__ : Any = False
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Union[str, Any] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase__ : Optional[Any] = longest_subsequence(a_ )
if len(a_ ) > len(a_ ):
lowerCamelCase__ : Optional[Any] = temp_array
else:
i += 1
lowerCamelCase__ : List[str] = [element for element in array[1:] if element >= pivot]
lowerCamelCase__ : Optional[int] = [pivot, *longest_subsequence(a_ )]
if len(a_ ) > len(a_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mra"""
def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = block_per_row
SCREAMING_SNAKE_CASE : Optional[int] = approx_mode
SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
| 698 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
SCREAMING_SNAKE_CASE__ : Tuple =['bert-base-uncased', 'bert-base-cased']
SCREAMING_SNAKE_CASE__ : Optional[int] ='hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class _UpperCAmelCase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , _lowercase ) -> List[Any]:
super().__init__()
_lowerCamelCase : Union[str, Any] = tokenizer
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowercase )
_lowerCamelCase : Any = TFAutoModel.from_config(_lowercase )
def a__ ( self , _lowercase ) -> str:
_lowerCamelCase : Union[str, Any] = self.tokenizer(_lowercase )
_lowerCamelCase : Optional[Any] = self.bert(**_lowercase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCamelCase : int = [
BertTokenizer.from_pretrained(_lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_lowerCamelCase : str = [TFBertTokenizer.from_pretrained(_lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_lowercase , use_fast_bert_tokenizer=_lowercase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCamelCase : str = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
_lowerCamelCase : int = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def a__ ( self ) -> List[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCamelCase : List[Any] = tokenizer(_lowercase , return_tensors='''tf''' , padding='''longest''' )
_lowerCamelCase : Tuple = tf_tokenizer(_lowercase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def a__ ( self ) -> int:
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : str = tf_tokenizer(self.paired_sentences )
_lowerCamelCase : List[str] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def a__ ( self ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : Optional[Any] = tf.function(_lowercase )
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCamelCase : List[str] = tf.constant(_lowercase )
_lowerCamelCase : int = compiled_tokenizer(_lowercase )
_lowerCamelCase : Optional[int] = tf_tokenizer(_lowercase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a__ ( self ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : Dict = ModelToSave(tokenizer=_lowercase )
_lowerCamelCase : Tuple = tf.convert_to_tensor(self.test_sentences )
_lowerCamelCase : int = model(_lowercase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCamelCase : Optional[Any] = Path(_lowercase ) / '''saved.model'''
model.save(_lowercase )
_lowerCamelCase : Union[str, Any] = tf.keras.models.load_model(_lowercase )
_lowerCamelCase : List[str] = loaded_model(_lowercase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 721 | """simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = False , ) -> Tuple:
super().__init__()
_lowerCamelCase : Tuple = nn.Embedding(_lowercase , _lowercase )
_lowerCamelCase : Dict = nn.Embedding(_lowercase , _lowercase )
_lowerCamelCase : Tuple = False
_lowerCamelCase : Any = nn.Dropout(p=_lowercase )
_lowerCamelCase : List[Any] = TaConfig(
vocab_size=_lowercase , d_model=_lowercase , num_heads=_lowercase , d_kv=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , feed_forward_proj=_lowercase , is_decoder=_lowercase , is_encoder_decoder=_lowercase , )
_lowerCamelCase : List[Any] = nn.ModuleList()
for lyr_num in range(_lowercase ):
_lowerCamelCase : Tuple = TaBlock(_lowercase )
self.encoders.append(_lowercase )
_lowerCamelCase : str = TaLayerNorm(_lowercase )
_lowerCamelCase : List[Any] = nn.Dropout(p=_lowercase )
def a__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
_lowerCamelCase : List[Any] = self.token_embedder(_lowercase )
_lowerCamelCase : Union[str, Any] = encoder_input_tokens.shape[1]
_lowerCamelCase : int = torch.arange(_lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowercase )
_lowerCamelCase : Tuple = self.dropout_pre(_lowercase )
# inverted the attention mask
_lowerCamelCase : int = encoder_input_tokens.size()
_lowerCamelCase : Union[str, Any] = self.get_extended_attention_mask(_lowercase , _lowercase )
for lyr in self.encoders:
_lowerCamelCase : List[Any] = lyr(_lowercase , _lowercase )[0]
_lowerCamelCase : str = self.layer_norm(_lowercase )
return self.dropout_post(_lowercase ), encoder_inputs_mask
| 558 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCAmelCase_ = TypeVar('''T''')
class __magic_name__ ( Generic[T] ):
"""simple docstring"""
lowerCAmelCase : deque[T] # Cache store of keys
lowerCAmelCase : set[T] # References of the keys in cache
lowerCAmelCase : int = 1_0 # Maximum capacity of cache
def __init__( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = deque()
_UpperCamelCase: Tuple = set()
if not n:
_UpperCamelCase: int = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_UpperCamelCase: Optional[Any] = n
def lowerCAmelCase ( self : Optional[Any] , _lowercase : T ):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCamelCase: Optional[Any] = self.dq_store.pop()
self.key_reference.remove(_lowercase )
else:
self.dq_store.remove(_lowercase )
self.dq_store.appendleft(_lowercase )
self.key_reference.add(_lowercase )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for k in self.dq_store:
print(_lowercase )
def __repr__( self : Any ):
"""simple docstring"""
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 271 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __magic_name__ :
"""simple docstring"""
lowerCAmelCase : Optional[int] = LEDConfig
lowerCAmelCase : List[str] = {}
lowerCAmelCase : str = '''gelu'''
def __init__( self : int , _lowercase : Tuple , _lowercase : Dict=13 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[int]=True , _lowercase : Any=False , _lowercase : Optional[int]=99 , _lowercase : Dict=32 , _lowercase : Optional[Any]=2 , _lowercase : Union[str, Any]=4 , _lowercase : List[str]=37 , _lowercase : Optional[Any]=0.1 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]=20 , _lowercase : Tuple=2 , _lowercase : str=1 , _lowercase : Optional[int]=0 , _lowercase : List[str]=4 , ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = parent
_UpperCamelCase: Union[str, Any] = batch_size
_UpperCamelCase: Optional[Any] = seq_length
_UpperCamelCase: str = is_training
_UpperCamelCase: Dict = use_labels
_UpperCamelCase: List[Any] = vocab_size
_UpperCamelCase: Dict = hidden_size
_UpperCamelCase: int = num_hidden_layers
_UpperCamelCase: Dict = num_attention_heads
_UpperCamelCase: Dict = intermediate_size
_UpperCamelCase: Union[str, Any] = hidden_dropout_prob
_UpperCamelCase: Any = attention_probs_dropout_prob
_UpperCamelCase: Optional[Any] = max_position_embeddings
_UpperCamelCase: Union[str, Any] = eos_token_id
_UpperCamelCase: Union[str, Any] = pad_token_id
_UpperCamelCase: Optional[Any] = bos_token_id
_UpperCamelCase: Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase: Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase: Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase: Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase: Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase: Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase: Dict = prepare_led_inputs_dict(_lowercase , _lowercase , _lowercase )
_UpperCamelCase: Tuple = tf.concat(
[tf.zeros_like(_lowercase )[:, :-1], tf.ones_like(_lowercase )[:, -1:]] , axis=-1 , )
_UpperCamelCase: List[Any] = global_attention_mask
return config, inputs_dict
def lowerCAmelCase ( self : Dict , _lowercase : List[str] , _lowercase : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = TFLEDModel(config=_lowercase ).get_decoder()
_UpperCamelCase: Optional[int] = inputs_dict['''input_ids''']
_UpperCamelCase: Dict = input_ids[:1, :]
_UpperCamelCase: Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
_UpperCamelCase: Optional[int] = 1
# first forward pass
_UpperCamelCase: Optional[Any] = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
_UpperCamelCase , _UpperCamelCase: Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase: Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase: List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase: Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase: Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase: str = model(_lowercase , attention_mask=_lowercase )[0]
_UpperCamelCase: Optional[int] = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase: Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase: Any = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase: Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-3 )
def lowerCAmelCase_ ( lowercase: Tuple , lowercase: List[str] , lowercase: List[str] , lowercase: int=None , lowercase: int=None , lowercase: List[str]=None , lowercase: List[str]=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase: Optional[Any] = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase: int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCamelCase: str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __magic_name__ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCAmelCase : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase : Union[str, Any] = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase : str = True
lowerCAmelCase : Any = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: Dict = TFLEDModelTester(self )
_UpperCamelCase: Any = ConfigTester(self , config_class=_lowercase )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase: str = tf.zeros_like(inputs_dict['''attention_mask'''] )
_UpperCamelCase: Any = 2
_UpperCamelCase: List[str] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_UpperCamelCase: int = True
_UpperCamelCase: str = self.model_tester.seq_length
_UpperCamelCase: Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_lowercase : Union[str, Any] ):
_UpperCamelCase: Tuple = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_lowercase : int ):
_UpperCamelCase: str = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase: Any = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase: Optional[int] = True
_UpperCamelCase: List[Any] = False
_UpperCamelCase: Optional[int] = False
_UpperCamelCase: Union[str, Any] = model_class(_lowercase )
_UpperCamelCase: Optional[int] = model(self._prepare_for_class(_lowercase , _lowercase ) )
_UpperCamelCase: List[str] = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_UpperCamelCase: Optional[int] = model_class(_lowercase )
_UpperCamelCase: Union[str, Any] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase: Union[str, Any] = True
_UpperCamelCase: List[str] = model_class(_lowercase )
_UpperCamelCase: Dict = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_UpperCamelCase: str = True
_UpperCamelCase: Dict = True
_UpperCamelCase: int = model_class(_lowercase )
_UpperCamelCase: Optional[int] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( lowercase: Tuple ) -> List[str]:
'''simple docstring'''
return tf.constant(lowercase , dtype=tf.intaa )
UpperCAmelCase_ = 1E-4
@slow
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_UpperCamelCase: List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_UpperCamelCase: int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_UpperCamelCase: Union[str, Any] = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
_UpperCamelCase: Optional[Any] = model(**_lowercase )[0]
_UpperCamelCase: Optional[int] = (1, 1_024, 768)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
_UpperCamelCase: Optional[Any] = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_UpperCamelCase: Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_UpperCamelCase: List[str] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_UpperCamelCase: Tuple = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
_UpperCamelCase: Any = model(**_lowercase )[0]
_UpperCamelCase: Dict = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
_UpperCamelCase: Optional[Any] = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 , rtol=1E-3 ) | 271 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
a = 10
a = 256
def _lowercase ( lowercase__ ):
if len(_UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
__lowerCAmelCase : Any = MinHash(num_perm=_UpperCAmelCase )
for token in set(_UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _lowercase ( lowercase__ ):
return {t for t in NON_ALPHA.split(_UpperCAmelCase ) if len(t.strip() ) > 0}
class __lowercase :
def __init__( self , *,
A_ = 0.85 , ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = duplication_jaccard_threshold
__lowerCAmelCase : Optional[Any] = NUM_PERM
__lowerCAmelCase : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowerCAmelCase : Optional[Any] = defaultdict(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self._index.query(__SCREAMING_SNAKE_CASE )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__SCREAMING_SNAKE_CASE )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = []
for base, duplicates in self._duplicate_clusters.items():
__lowerCAmelCase : str = [base] + list(__SCREAMING_SNAKE_CASE )
# reformat the cluster to be a list of dict
__lowerCAmelCase : Optional[Any] = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__SCREAMING_SNAKE_CASE )
return duplicate_clusters
def UpperCamelCase__ ( self , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.get_duplicate_clusters()
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( lowercase__ ):
__lowerCAmelCase, __lowerCAmelCase : List[str] = element
__lowerCAmelCase : Union[str, Any] = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _lowercase ( lowercase__ ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCAmelCase , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Union[str, Any] = DuplicationIndex(duplication_jaccard_threshold=_UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCAmelCase ) ) , max_queue_size=1_0_0 ) ):
di.add(_UpperCAmelCase , _UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Dict = get_tokens(_UpperCAmelCase )
__lowerCAmelCase : List[Any] = get_tokens(_UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a = None
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Tuple = []
for elementa in cluster:
__lowerCAmelCase : Optional[int] = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__lowerCAmelCase : str = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_UpperCAmelCase , _UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowerCAmelCase : str = 1
extremes.append(_UpperCAmelCase )
return extremes
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
global _shared_dataset
__lowerCAmelCase : Tuple = dataset
__lowerCAmelCase : Optional[int] = []
__lowerCAmelCase : Union[str, Any] = partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCAmelCase , _UpperCAmelCase , ) , total=len(_UpperCAmelCase ) , ):
extremes_list.append(_UpperCAmelCase )
return extremes_list
def _lowercase ( lowercase__ , lowercase__ = 0.8_5 ):
__lowerCAmelCase : Optional[Any] = make_duplicate_clusters(_UpperCAmelCase , _UpperCAmelCase )
__lowerCAmelCase : str = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : Union[str, Any] = find_extremes(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
__lowerCAmelCase : Dict = element
__lowerCAmelCase : int = duplicate_indices - set(extreme_dict.keys() )
__lowerCAmelCase : str = dataset.filter(lambda lowercase__ , lowercase__ : idx not in remove_indices , with_indices=_UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowerCAmelCase : Dict = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__lowerCAmelCase : Optional[int] = extreme_dict[element['''base_index''']]['''copies''']
print(f"""Original dataset size: {len(_UpperCAmelCase )}""" )
print(f"""Number of duplicate clusters: {len(_UpperCAmelCase )}""" )
print(f"""Files in duplicate cluster: {len(_UpperCAmelCase )}""" )
print(f"""Unique files in duplicate cluster: {len(_UpperCAmelCase )}""" )
print(f"""Filtered dataset size: {len(_UpperCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 716 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__lowerCAmelCase : Any = [1_4_4, 1_9_2, 2_4_0]
__lowerCAmelCase : Dict = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
__lowerCAmelCase : int = [9_6, 1_2_0, 1_4_4]
__lowerCAmelCase : str = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
__lowerCAmelCase : List[str] = [6_4, 8_0, 9_6]
__lowerCAmelCase : List[Any] = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
__lowerCAmelCase : Tuple = 0.0_5
__lowerCAmelCase : List[str] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
__lowerCAmelCase : int = 5_1_2
__lowerCAmelCase : Dict = 1_6
__lowerCAmelCase : Union[str, Any] = 2_1
__lowerCAmelCase : List[Any] = '''pascal-voc-id2label.json'''
else:
__lowerCAmelCase : List[str] = 1_0_0_0
__lowerCAmelCase : Any = '''imagenet-1k-id2label.json'''
__lowerCAmelCase : Union[str, Any] = '''huggingface/label-files'''
__lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__lowerCAmelCase : Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowerCAmelCase : Optional[int] = idalabel
__lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
return config
def _lowercase ( lowercase__ , lowercase__=False ):
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
__lowerCAmelCase : Dict = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
__lowerCAmelCase : int = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
__lowerCAmelCase : Dict = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
__lowerCAmelCase : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
__lowerCAmelCase : int = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
__lowerCAmelCase : Optional[Any] = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
__lowerCAmelCase : List[str] = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
__lowerCAmelCase : List[Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
__lowerCAmelCase : List[Any] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__lowerCAmelCase : Union[str, Any] = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__lowerCAmelCase : Any = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
__lowerCAmelCase : List[Any] = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
__lowerCAmelCase : Optional[int] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
__lowerCAmelCase : Any = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
__lowerCAmelCase : int = name.replace(f""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if f""".global_rep.{i}.bias""" in name:
__lowerCAmelCase : Dict = name.replace(f""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
__lowerCAmelCase : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
__lowerCAmelCase : str = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
__lowerCAmelCase : Any = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
__lowerCAmelCase : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
__lowerCAmelCase : Dict = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
__lowerCAmelCase : List[str] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
__lowerCAmelCase : Dict = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
__lowerCAmelCase : Optional[Any] = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
__lowerCAmelCase : Tuple = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
__lowerCAmelCase : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
__lowerCAmelCase : Optional[Any] = '''mobilevit.''' + name
return name
def _lowercase ( lowercase__ , lowercase__ , lowercase__=False ):
if base_model:
__lowerCAmelCase : List[Any] = ''''''
else:
__lowerCAmelCase : List[str] = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
__lowerCAmelCase : Optional[int] = orig_state_dict.pop(lowercase__ )
if key[:8] == "encoder.":
__lowerCAmelCase : Dict = key[8:]
if "qkv" in key:
__lowerCAmelCase : List[str] = key.split('''.''' )
__lowerCAmelCase : Any = int(key_split[0][6:] ) - 1
__lowerCAmelCase : List[Any] = int(key_split[3] )
__lowerCAmelCase : List[Any] = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
__lowerCAmelCase : Union[str, Any] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__lowerCAmelCase : Union[str, Any] = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
__lowerCAmelCase : Dict = val[:dim, :]
__lowerCAmelCase : List[Any] = val[dim : dim * 2, :]
__lowerCAmelCase : Optional[int] = val[-dim:, :]
else:
__lowerCAmelCase : Optional[int] = val[:dim]
__lowerCAmelCase : List[str] = val[dim : dim * 2]
__lowerCAmelCase : List[Any] = val[-dim:]
else:
__lowerCAmelCase : int = val
return orig_state_dict
def _lowercase ( ):
__lowerCAmelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase : Dict = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
__lowerCAmelCase : Optional[Any] = get_mobilevit_config(lowercase__ )
# load original state_dict
__lowerCAmelCase : Any = torch.load(lowercase__ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
__lowerCAmelCase : str = MobileViTForSemanticSegmentation(lowercase__ ).eval()
else:
__lowerCAmelCase : Optional[int] = MobileViTForImageClassification(lowercase__ ).eval()
__lowerCAmelCase : int = convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowerCAmelCase : List[str] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__lowerCAmelCase : List[str] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__lowerCAmelCase : List[Any] = model(**lowercase__ )
__lowerCAmelCase : Dict = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
__lowerCAmelCase : int = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__lowerCAmelCase : Dict = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__lowerCAmelCase : Tuple = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1E-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
__lowerCAmelCase : Optional[Any] = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
__lowerCAmelCase : str = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
__lowerCAmelCase : Tuple = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
__lowerCAmelCase : Union[str, Any] = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
__lowerCAmelCase : str = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase__ , organization='''apple''' )
model.push_to_hub(lowercase__ , organization='''apple''' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 583 | 0 |
'''simple docstring'''
a : List[str] = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a : List[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
a : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 640 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a ( _lowerCamelCase ):
def __init__( self : Optional[int] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ):
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
snake_case_ = field
snake_case_ = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
snake_case_ = Json(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , field=lowercase_ , **lowercase_ , )
def A_ ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
snake_case_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
snake_case_ = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
class a :
def __init__( self : Dict , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
snake_case_ = dataset
snake_case_ = path_or_buf
snake_case_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case_ = num_proc
snake_case_ = '''utf-8'''
snake_case_ = to_json_kwargs
def A_ ( self : str ):
snake_case_ = self.to_json_kwargs.pop('''path_or_buf''' , lowercase_ )
snake_case_ = self.to_json_kwargs.pop('''orient''' , '''records''' )
snake_case_ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
snake_case_ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
snake_case_ = self.to_json_kwargs.pop('''compression''' , lowercase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=lowercase_ ) as buffer:
snake_case_ = self._write(file_obj=lowercase_ , orient=lowercase_ , lines=lowercase_ , index=lowercase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"The compression parameter is not supported when writing to a buffer, but compression={compression}"
''' was passed. Please provide a local path instead.''' )
snake_case_ = self._write(
file_obj=self.path_or_buf , orient=lowercase_ , lines=lowercase_ , index=lowercase_ , **self.to_json_kwargs )
return written
def A_ ( self : List[str] , lowercase_ : Dict ):
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = args
snake_case_ = query_table(
table=self.dataset.data , key=slice(lowercase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case_ = batch.to_pandas().to_json(
path_or_buf=lowercase_ , orient=lowercase_ , lines=lowercase_ , index=lowercase_ , **lowercase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def A_ ( self : Optional[int] , lowercase_ : BinaryIO , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : int , **lowercase_ : Optional[int] , ):
snake_case_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
snake_case_ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase_ )
else:
snake_case_ ,snake_case_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase_ , lowercase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(lowercase_ )
return written
| 640 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCamelCase )
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = field(default='''language-modeling''' ,metadata={'''include_in_asdict_even_if_is_default''': True} )
A = Features({'''text''': Value('''string''' )} )
A = Features({} )
A = "text"
@property
def lowerCamelCase_ ( self ):
return {self.text_column: "text"}
| 571 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCamelCase__ ( _A: Optional[Any] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = image.size
__lowerCamelCase , __lowerCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
__lowerCamelCase = np.array(_A ).astype(np.floataa ) / 255.0
__lowerCamelCase = image[None].transpose(0 , 3 , 1 , 2 )
__lowerCamelCase = torch.from_numpy(_A )
return 2.0 * image - 1.0
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 1_0_0 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image ):
__lowerCamelCase = 1
elif isinstance(UpperCAmelCase , torch.Tensor ):
__lowerCamelCase = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}''' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
__lowerCamelCase = preprocess(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__lowerCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
__lowerCamelCase = next(self.unet.parameters() ).dtype
__lowerCamelCase = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
__lowerCamelCase = image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
__lowerCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase = {}
if accepts_eta:
__lowerCamelCase = eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
__lowerCamelCase = torch.cat([latents, image] , dim=1 )
__lowerCamelCase = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
__lowerCamelCase = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
__lowerCamelCase = self.vqvae.decode(UpperCAmelCase ).sample
__lowerCamelCase = torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
__lowerCamelCase = image / 2 + 0.5
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 571 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=12 , _snake_case : Any=7 , _snake_case : List[str]=True , _snake_case : int=True , _snake_case : int=True , _snake_case : Tuple=99 , _snake_case : List[Any]=32 , _snake_case : Optional[int]=32 , _snake_case : List[str]=2 , _snake_case : List[str]=4 , _snake_case : List[Any]=37 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Dict=5_12 , _snake_case : Union[str, Any]=0.02 , _snake_case : Any=0 , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = dropout
A__ = attention_dropout
A__ = max_position_embeddings
A__ = initializer_range
A__ = scope
A__ = bos_token_id
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A__ = input_mask.numpy()
A__ , A__ = input_mask.shape
A__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
A__ = 1
A__ = 0
A__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
A__ = TFBlipTextModel(config=_snake_case )
A__ = model(_snake_case , attention_mask=_snake_case , training=_snake_case )
A__ = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
A__ : Optional[int] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
def _a ( self : Any ):
"""simple docstring"""
A__ = BlipTextModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( self : int , _snake_case : int=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 9 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = "▁"
__UpperCamelCase : Any = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCamelCase : int = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCamelCase : Dict = {
"facebook/xglm-564M": 2_048,
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :Tuple="<s>" , __magic_name__ :List[Any]="</s>" , __magic_name__ :List[str]="</s>" , __magic_name__ :Any="<s>" , __magic_name__ :Tuple="<unk>" , __magic_name__ :int="<pad>" , __magic_name__ :Optional[Dict[str, Any]] = None , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
a = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
a = 7
a = [F'<madeupword{i}>' for i in range(self.num_madeup_words )]
a = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a = 1
# Mimic fairseq token-to-id alignment for the first 4 token
a = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
a = len(self.sp_model )
a = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__magic_name__ )
a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :List[Any] ):
'''simple docstring'''
a = self.__dict__.copy()
a = None
a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :Any , __magic_name__ :Any ):
'''simple docstring'''
a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
a = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ ))
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ ))
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self :Any , __magic_name__ :str ):
'''simple docstring'''
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Any ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self :List[str] , __magic_name__ :List[str] ):
'''simple docstring'''
a = """""".join(__magic_name__ ).replace(__magic_name__ , """ """ ).strip()
return out_string
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , """wb""" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 468 | 0 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = old_name
if "patch_embed" in old_name:
UpperCamelCase , UpperCamelCase , UpperCamelCase = old_name.split("." )
if layer == "0":
UpperCamelCase = old_name.replace("0" , "convolution1" )
elif layer == "1":
UpperCamelCase = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
UpperCamelCase = old_name.replace("3" , "convolution2" )
else:
UpperCamelCase = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = r"\b\d{2}\b"
if bool(re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
UpperCamelCase = re.search(r"\d\.\d\d." , _SCREAMING_SNAKE_CASE ).group()
else:
UpperCamelCase = re.search(r"\d\.\d." , _SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
UpperCamelCase = old_name.replace(_SCREAMING_SNAKE_CASE , "" )
UpperCamelCase = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
UpperCamelCase = "intermediate_stages." + trimmed_name
else:
UpperCamelCase = old_name.replace(_SCREAMING_SNAKE_CASE , "" )
if int(match[2] ) < num_meta4D_last_stage:
UpperCamelCase = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
UpperCamelCase = str(int(match[2] ) - num_meta4D_last_stage )
UpperCamelCase = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
UpperCamelCase = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
UpperCamelCase = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
UpperCamelCase = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
UpperCamelCase = trimmed_name.replace("fc2" , "linear_out" )
UpperCamelCase = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
UpperCamelCase = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCamelCase = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCamelCase = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
UpperCamelCase = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
UpperCamelCase = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
UpperCamelCase = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
UpperCamelCase = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCamelCase = new_name.replace("norm" , "layernorm" )
UpperCamelCase = "efficientformer." + new_name
else:
UpperCamelCase = "efficientformer.encoder." + new_name
return new_name
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for key in checkpoint.copy().keys():
UpperCamelCase = checkpoint.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
return checkpoint
def a__ ( ):
"""simple docstring"""
UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
UpperCamelCase = EfficientFormerConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCamelCase = EfficientFormerForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE )
UpperCamelCase = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
UpperCamelCase = config.depths[-1] - config.num_metaad_blocks + 1
UpperCamelCase = convert_torch_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = 256
UpperCamelCase = 224
UpperCamelCase = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
# original processing pipeline
UpperCamelCase = Compose(
[
Resize(_SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
] )
UpperCamelCase = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase = outputs.logits
UpperCamelCase = (1, 1_000)
if "l1" in model_name:
UpperCamelCase = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCamelCase = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCamelCase = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message="Add model" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message="Add image processor" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 713 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _lowerCamelCase :
pass
| 544 | 0 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None ):
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
A_ : List[Any] = quote(snake_case__ )
return hfh.hf_hub_url(snake_case__ , snake_case__ , repo_type="""dataset""" , revision=snake_case__ )
| 180 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[Any] = """informer"""
_A : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "student_t" , lowerCAmelCase_ = "nll" , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = "mean" , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = True , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.05 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 100 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_=True , lowerCAmelCase_ = "prob" , lowerCAmelCase_ = 5 , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
# time series specific configuration
A_ : Optional[Any] = prediction_length
A_ : Dict = context_length or prediction_length
A_ : Dict = distribution_output
A_ : Tuple = loss
A_ : Dict = input_size
A_ : Union[str, Any] = num_time_features
A_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : Optional[int] = scaling
A_ : Optional[Any] = num_dynamic_real_features
A_ : Tuple = num_static_real_features
A_ : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A_ : List[str] = cardinality
else:
A_ : List[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A_ : int = embedding_dimension
else:
A_ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : Dict = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : Any = encoder_ffn_dim
A_ : Tuple = decoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Optional[int] = decoder_layers
A_ : List[str] = dropout
A_ : List[str] = attention_dropout
A_ : Any = activation_dropout
A_ : Any = encoder_layerdrop
A_ : List[Any] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[Any] = init_std
A_ : Optional[int] = use_cache
# Informer
A_ : Dict = attention_type
A_ : List[Any] = sampling_factor
A_ : List[Any] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase(self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 180 | 1 |
'''simple docstring'''
def A ( lowercase__ : int ) -> list[int]:
if length <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(lowercase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10)) | 712 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 383 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Optional[int] = logging.get_logger(__name__)
A: Tuple = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = 'xglm'
__lowerCAmelCase : int = ['past_key_values']
__lowerCAmelCase : List[Any] = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE=256008 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : Any = d_model
UpperCAmelCase : int = ffn_dim
UpperCAmelCase : Optional[int] = num_layers
UpperCAmelCase : Optional[int] = attention_heads
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : List[str] = dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : Union[str, Any] = activation_dropout
UpperCAmelCase : List[str] = layerdrop
UpperCAmelCase : List[Any] = init_std
UpperCAmelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : List[Any] = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 160 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
A: Union[str, Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
A: Tuple = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
A: Tuple = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
A: Any = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.5 ) -> Optional[Any]:
'''simple docstring'''
if NLTK_VERSION >= version.Version("""3.6.5""" ):
UpperCAmelCase : int = [
meteor_score.single_meteor_score(
word_tokenize(_SCREAMING_SNAKE_CASE ) , word_tokenize(_SCREAMING_SNAKE_CASE ) , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE )
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
else:
UpperCAmelCase : List[str] = [
meteor_score.single_meteor_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , alpha=_SCREAMING_SNAKE_CASE , beta=_SCREAMING_SNAKE_CASE , gamma=_SCREAMING_SNAKE_CASE )
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return {"meteor": np.mean(_SCREAMING_SNAKE_CASE )}
| 160 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_UpperCAmelCase : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_UpperCAmelCase : Any = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCamelCase ( lowercase_ : Vector , lowercase_ : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def UpperCamelCase ( lowercase_ : Vector , lowercase_ : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCamelCase ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 145 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def UpperCamelCase ( lowercase_ : np.ndarray , lowercase_ : Optional[str] , lowercase_ : Optional[str] = None ) -> Tuple:
'''simple docstring'''
lowercase =tesseract_config if tesseract_config is not None else ''''''
# apply OCR
lowercase =to_pil_image(lowercase_ )
lowercase , lowercase =pil_image.size
lowercase =pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
lowercase , lowercase , lowercase , lowercase , lowercase =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase =[idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
lowercase =[word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase =[]
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
lowercase =[x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
lowercase =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['pixel_values']
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = None , snake_case_ = "" , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase =get_size_dict(snake_case_ )
lowercase =do_resize
lowercase =size
lowercase =resample
lowercase =apply_ocr
lowercase =ocr_lang
lowercase =tesseract_config
def _A( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = None , **snake_case_ , ):
lowercase =get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase =(size['''height'''], size['''width'''])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
lowercase =do_resize if do_resize is not None else self.do_resize
lowercase =size if size is not None else self.size
lowercase =get_size_dict(snake_case_ )
lowercase =resample if resample is not None else self.resample
lowercase =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase =make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
lowercase =[to_numpy_array(snake_case_ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase =[]
lowercase =[]
for image in images:
lowercase , lowercase =apply_tesseract(snake_case_ , snake_case_ , snake_case_ )
words_batch.append(snake_case_ )
boxes_batch.append(snake_case_ )
if do_resize:
lowercase =[self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase =[flip_channel_order(snake_case_ ) for image in images]
lowercase =[to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
lowercase =BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case_ )
if apply_ocr:
lowercase =words_batch
lowercase =boxes_batch
return data
| 145 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__lowerCamelCase = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
__lowerCamelCase = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
__lowerCamelCase = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
__lowerCamelCase = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def lowerCamelCase__ ( self : Tuple , __snake_case : Any ) -> List[str]:
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def lowerCamelCase__ ( self : Any , __snake_case : Any , __snake_case : Any , __snake_case : Tuple=0.9 , __snake_case : List[Any]=3 , __snake_case : Tuple=0.5 ) -> Optional[Any]:
if NLTK_VERSION >= version.Version("""3.6.5""" ):
__magic_name__: List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(__snake_case ) , word_tokenize(__snake_case ) , alpha=__snake_case , beta=__snake_case , gamma=__snake_case )
for ref, pred in zip(__snake_case , __snake_case )
]
else:
__magic_name__: str = [
meteor_score.single_meteor_score(__snake_case , __snake_case , alpha=__snake_case , beta=__snake_case , gamma=__snake_case )
for ref, pred in zip(__snake_case , __snake_case )
]
return {"meteor": np.mean(__snake_case )}
| 96 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__: List[str] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__magic_name__: List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: Union[str, Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__magic_name__: Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6_0_0_0,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__magic_name__: int = tempfile.mkdtemp()
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
# load decoder from hub
__magic_name__: Dict = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCamelCase__ ( self : Any , **__snake_case : str ) -> Optional[int]:
__magic_name__: Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : str , **__snake_case : int ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : int , **__snake_case : List[str] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: Dict = self.get_tokenizer()
__magic_name__: Any = self.get_feature_extractor()
__magic_name__: Tuple = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
__magic_name__: Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__magic_name__: int = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__magic_name__: Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__snake_case , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: List[Any] = self.get_decoder()
__magic_name__: int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Tuple = floats_list((3, 1_0_0_0) )
__magic_name__: List[str] = feature_extractor(__snake_case , return_tensors="""np""" )
__magic_name__: Tuple = processor(__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
__magic_name__: Tuple = self.get_feature_extractor()
__magic_name__: List[str] = self.get_tokenizer()
__magic_name__: str = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = """This is a test string"""
__magic_name__: List[str] = processor(text=__snake_case )
__magic_name__: Tuple = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : int , __snake_case : List[str]=(2, 1_0, 1_6) , __snake_case : List[Any]=7_7 ) -> Dict:
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Tuple = self.get_tokenizer()
__magic_name__: Any = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: List[Any] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__magic_name__: str = processor.decode(__snake_case )
__magic_name__: Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCamelCase__ ( self : int , __snake_case : Dict ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: List[Any] = self.get_tokenizer()
__magic_name__: int = self.get_decoder()
__magic_name__: Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__magic_name__: Optional[int] = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
__magic_name__: Any = processor.batch_decode(__snake_case , __snake_case )
__magic_name__: Dict = list(__snake_case )
with get_context("""fork""" ).Pool() as p:
__magic_name__: List[str] = decoder.decode_beams_batch(__snake_case , __snake_case )
__magic_name__, __magic_name__, __magic_name__: Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[str] = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: Optional[int] = self.get_decoder()
__magic_name__: Dict = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: str = self._get_dummy_logits()
__magic_name__: Dict = 1_5
__magic_name__: int = -20.0
__magic_name__: int = -4.0
__magic_name__: Dict = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Optional[int] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Any = [d[0][0] for d in decoded_decoder_out]
__magic_name__: Optional[int] = [d[0][2] for d in decoded_decoder_out]
__magic_name__: Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __snake_case , atol=1E-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __snake_case , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Union[str, Any] = self.get_decoder()
__magic_name__: str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Any = self._get_dummy_logits()
__magic_name__: Union[str, Any] = 2.0
__magic_name__: Optional[Any] = 5.0
__magic_name__: Optional[Any] = -20.0
__magic_name__: List[str] = True
__magic_name__: List[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
__magic_name__: Union[str, Any] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , )
__magic_name__: List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __snake_case )
__magic_name__: List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: Optional[int] = os.listdir(__snake_case )
__magic_name__: Union[str, Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
__magic_name__: Any = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: str = os.listdir(__snake_case )
__magic_name__: Tuple = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = floats_list((3, 1_0_0_0) )
__magic_name__: Tuple = processor_wavaveca(__snake_case , return_tensors="""np""" )
__magic_name__: Optional[Any] = processor_auto(__snake_case , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__magic_name__: int = self._get_dummy_logits()
__magic_name__: List[Any] = processor_wavaveca.batch_decode(__snake_case )
__magic_name__: Union[str, Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Dict = self.get_decoder()
__magic_name__: List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCamelCase__ ( __snake_case : Optional[int] , __snake_case : int ) -> int:
__magic_name__: Any = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__magic_name__: Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Tuple = self._get_dummy_logits()[0]
__magic_name__: List[Any] = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Optional[int] = self._get_dummy_logits()
__magic_name__: Any = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
import torch
__magic_name__: List[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__snake_case )
__magic_name__: Dict = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__magic_name__: Any = iter(__snake_case )
__magic_name__: Optional[int] = next(__snake_case )
__magic_name__: Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__magic_name__: Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__magic_name__: List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__magic_name__: List[Any] = model(__snake_case ).logits.cpu().numpy()
__magic_name__: Optional[Any] = processor.decode(logits[0] , output_word_offsets=__snake_case )
__magic_name__: List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__magic_name__: str = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__magic_name__: Tuple = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , __snake_case )
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , output.text )
# output times
__magic_name__: Dict = torch.tensor(self.get_from_offsets(__snake_case , """start_time""" ) )
__magic_name__: Optional[Any] = torch.tensor(self.get_from_offsets(__snake_case , """end_time""" ) )
# fmt: off
__magic_name__: Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__magic_name__: int = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
| 96 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Tuple:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__=0 ) -> int:
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda snake_case__ : x[column] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=float('''inf''' ) ) -> Tuple:
for i in range(points_counts - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase =euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase =current_dis
return min_dis
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=float('''inf''' ) ) -> Tuple:
for i in range(min(6 , points_counts - 1 ) , SCREAMING_SNAKE_CASE__ ):
for j in range(max(0 , i - 6 ) , SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase =euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__UpperCAmelCase =current_dis
return min_dis
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if points_counts <= 3:
return dis_between_closest_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# recursion
__UpperCAmelCase =points_counts // 2
__UpperCAmelCase =closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , points_sorted_on_y[:mid] , SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase =closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , points_sorted_on_y[mid:] , points_counts - mid )
__UpperCAmelCase =min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase =[]
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase =dis_between_closest_in_strip(
SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
return min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Any:
__UpperCAmelCase =column_based_sort(SCREAMING_SNAKE_CASE__ , column=0 )
__UpperCAmelCase =column_based_sort(SCREAMING_SNAKE_CASE__ , column=1 )
return (
closest_pair_of_points_sqr(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
) ** 0.5
if __name__ == "__main__":
UpperCamelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 704 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase_ = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> str:
if "://" in dataset_path:
__UpperCAmelCase =dataset_path.split('''://''' )[1]
return dataset_path
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> int:
__UpperCAmelCase =not is_remote_filesystem(snake_case__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case__ ) , fs._strip_protocol(snake_case__ ) )
else:
fs.mv(snake_case__ , snake_case__ , recursive=snake_case__ )
def SCREAMING_SNAKE_CASE ( ) -> None:
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =threading.Lock()
| 142 | 0 |
import os
import numpy
import onnx
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = a.name
A__ = b.name
A__ = ''
A__ = ''
A__ = a == b
A__ = name_a
A__ = name_b
return res
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__UpperCamelCase , __UpperCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , __UpperCamelCase , __UpperCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __UpperCamelCase , __UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
for n in graph_proto.node:
_node_replace_input_with(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = list(model.graph.initializer )
A__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A__ = inits[i].name
A__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __UpperCamelCase , __UpperCamelCase )
def A ( __UpperCamelCase ) -> Tuple:
A__ = os.path.dirname(__UpperCamelCase )
A__ = os.path.basename(__UpperCamelCase )
A__ = onnx.load(os.path.join(__UpperCamelCase , __UpperCamelCase ) )
A__ = list(model.graph.initializer )
A__ = set()
A__ = {}
A__ = []
A__ = 0
for i in range(len(__UpperCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__UpperCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__UpperCamelCase )
dup_set.add(__UpperCamelCase )
A__ = inits[j].data_type
A__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , __UpperCamelCase )
total_reduced_size += mem_size
A__ = inits[i].name
A__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__UpperCamelCase )
else:
A__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB' )
A__ = sorted(__UpperCamelCase )
_remove_dup_initializers_from_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 'optimized_' + model_file_name
A__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
onnx.save(__UpperCamelCase , __UpperCamelCase )
return new_model
| 9 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 9 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__UpperCamelCase : int = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
A: Optional[int] = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."})
A: bool = field(
default=__lowerCAmelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
A: Optional[int] = field(
default=__lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A: Optional[int] = field(
default=__lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
A: Optional[int] = field(
default=__lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class __magic_name__ :
A: str = field(
default=__lowerCAmelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
A: str = field(
default=__lowerCAmelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."})
A: Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Train language if it is different from the evaluation language."})
A: Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
A: Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
A: Optional[str] = field(
default=__lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
A: Optional[bool] = field(
default=__lowerCAmelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
A: str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A: bool = field(
default=__lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A: bool = field(
default=__lowerCAmelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ : int = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase__ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCamelCase__ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCamelCase__ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : Optional[int] = train_dataset.features['''label'''].names
if training_args.do_eval:
UpperCamelCase__ : Tuple = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : str = eval_dataset.features['''label'''].names
if training_args.do_predict:
UpperCamelCase__ : int = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : Tuple = predict_dataset.features['''label'''].names
# Labels
UpperCamelCase__ : str = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel={str(SCREAMING_SNAKE_CASE ): label for i, label in enumerate(SCREAMING_SNAKE_CASE )} , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCamelCase__ : Optional[int] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCamelCase__ : Optional[Any] = False
def preprocess_function(SCREAMING_SNAKE_CASE : Optional[int] ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length , truncation=SCREAMING_SNAKE_CASE , )
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ : Union[str, Any] = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
UpperCamelCase__ : Optional[Any] = train_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCamelCase__ : Tuple = train_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ : Optional[int] = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
UpperCamelCase__ : int = eval_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCamelCase__ : List[Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCamelCase__ : Optional[int] = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_predict_samples )
UpperCamelCase__ : Tuple = predict_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
UpperCamelCase__ : Tuple = predict_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
UpperCamelCase__ : Any = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ):
UpperCamelCase__ : List[str] = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE ) else p.predictions
UpperCamelCase__ : Union[str, Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCamelCase__ : Optional[Any] = default_data_collator
elif training_args.fpaa:
UpperCamelCase__ : Optional[Any] = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
UpperCamelCase__ : Tuple = None
# Initialize our Trainer
UpperCamelCase__ : Optional[int] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
UpperCamelCase__ : str = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ : List[str] = last_checkpoint
UpperCamelCase__ : Tuple = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = train_result.metrics
UpperCamelCase__ : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE )
)
UpperCamelCase__ : Optional[int] = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('''train''' , SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : List[str] = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = trainer.predict(SCREAMING_SNAKE_CASE , metric_key_prefix='''predict''' )
UpperCamelCase__ : Union[str, Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE )
)
UpperCamelCase__ : List[Any] = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('''predict''' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('''predict''' , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
UpperCamelCase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Union[str, Any] = label_list[item]
writer.write(F"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 106 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _a ( SCREAMING_SNAKE_CASE : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__UpperCamelCase : List[Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __magic_name__ ( __lowerCAmelCase):
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCamelCase__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Optional[int] , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
UpperCamelCase__ : List[str] = model_type
UpperCamelCase__ : Optional[int] = tf_checkpoint
UpperCamelCase__ : List[Any] = pytorch_dump_output
UpperCamelCase__ : List[Any] = config
UpperCamelCase__ : Any = finetuning_task_name
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase__ : str = self._tf_checkpoint
UpperCamelCase__ : List[Any] = ''''''
else:
UpperCamelCase__ : Any = self._tf_checkpoint
UpperCamelCase__ : List[Any] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase__ , self._config , self._pytorch_dump_output , lowerCamelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 106 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> Any:
snake_case__ : List[str] = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Optional[Any] = """"""
else:
snake_case__ : Dict = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : str = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : int = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase ) -> Any:
snake_case__ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> List[str]:
snake_case__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]:
snake_case__ : Tuple = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=_lowerCAmelCase , )
snake_case__ : str = ViTHybridConfig(backbone_config=_lowerCAmelCase , image_size=384 , num_labels=1_000 )
snake_case__ : str = False
# load original model from timm
snake_case__ : Tuple = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Dict = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
snake_case__ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case__ : List[str] = """huggingface/label-files"""
snake_case__ : int = """imagenet-1k-id2label.json"""
snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = idalabel
snake_case__ : str = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : Any = ViTHybridModel(_lowerCAmelCase ).eval()
else:
snake_case__ : int = ViTHybridForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# create image processor
snake_case__ : Any = create_transform(**resolve_data_config({} , model=_lowerCAmelCase ) )
snake_case__ : str = transform.transforms
snake_case__ : Dict = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case__ : List[str] = ViTHybridImageProcessor(
do_resize=_lowerCAmelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCAmelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=_lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Any = prepare_img()
snake_case__ : Optional[Any] = transform(_lowerCAmelCase ).unsqueeze(0 )
snake_case__ : Tuple = processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
# verify logits
with torch.no_grad():
snake_case__ : List[Any] = model(_lowerCAmelCase )
snake_case__ : Optional[Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Tuple = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ : List[Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__a = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 374 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 374 | 1 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowerCAmelCase (__A):
"""simple docstring"""
_a = test_file.split(os.path.sep)
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''')
_a = components[-1]
if not test_fn.endswith('''py'''):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''')
if not test_fn.startswith('''test_modeling_'''):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''')
_a = components[:-1] + [test_fn.replace('''.py''' , '''''')]
_a = '''.'''.join(__A)
return test_module_path
def lowerCAmelCase (__A):
"""simple docstring"""
_a = get_module_path(__A)
_a = importlib.import_module(__A)
return test_module
def lowerCAmelCase (__A):
"""simple docstring"""
_a = []
_a = get_test_module(__A)
for attr in dir(__A):
if attr.endswith('''ModelTester'''):
tester_classes.append(getattr(__A , __A))
# sort with class names
return sorted(__A , key=lambda __A: x.__name__)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = []
_a = get_test_module(__A)
for attr in dir(__A):
_a = getattr(__A , __A)
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_a = getattr(__A , '''all_model_classes''' , [])
if len(__A) > 0:
test_classes.append(__A)
# sort with class names
return sorted(__A , key=lambda __A: x.__name__)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = get_test_classes(__A)
_a = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes)
# sort with class names
return sorted(__A , key=lambda __A: x.__name__)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = test_class()
if hasattr(__A , '''setUp'''):
test.setUp()
_a = None
if hasattr(__A , '''model_tester'''):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_a = test.model_tester.__class__
return model_tester
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_test_classes(__A)
_a = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__A)
# sort with class names
return sorted(__A , key=lambda __A: x.__name__)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_test_classes_for_model(__A , __A)
_a = []
for test_class in test_classes:
_a = get_model_tester_from_test_class(__A)
if tester_class is not None:
tester_classes.append(__A)
# sort with class names
return sorted(__A , key=lambda __A: x.__name__)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = get_test_classes(__A)
_a = {test_class: get_model_tester_from_test_class(__A) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase (__A):
"""simple docstring"""
_a = get_model_classes(__A)
_a = {
model_class: get_test_classes_for_model(__A , __A) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase (__A):
"""simple docstring"""
_a = get_model_classes(__A)
_a = {
model_class: get_tester_classes_for_model(__A , __A) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase (__A):
"""simple docstring"""
if isinstance(__A , __A):
return o
elif isinstance(__A , __A):
return o.__name__
elif isinstance(__A , (list, tuple)):
return [to_json(__A) for x in o]
elif isinstance(__A , __A):
return {to_json(__A): to_json(__A) for k, v in o.items()}
else:
return o
| 352 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase_ = get_tests_dir("fixtures")
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = mock.Mock()
_a = 500
_a = {}
_a = HTTPError
_a = {}
# Download this model to make sure it's in the cache.
_a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=A ) as mock_head:
_a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ) -> str:
"""simple docstring"""
_a = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def a__ (self ) -> Dict:
"""simple docstring"""
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
_a = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
_a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(A )
@is_staging_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ) -> Any:
"""simple docstring"""
_a = TOKEN
HfFolder.save_token(A )
@classmethod
def a__ (cls ) -> Union[str, Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def a__ (self ) -> str:
"""simple docstring"""
_a = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id='''test-image-processor''' , push_to_hub=A , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=A , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
_a = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
_a = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 352 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''table-transformer'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Optional[Any] , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Any=100 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : List[str]=2_048 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : Optional[int]=2_048 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Tuple=256 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : List[Any]=1.0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[int]="sine" , UpperCAmelCase__ : Tuple="resnet50" , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Optional[Any]=0.1 , **UpperCAmelCase__ : Any , ) ->List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
A__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = backbone_config.get('''model_type''')
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(UpperCAmelCase__)
# set timm attributes to None
A__ , A__ , A__ = None, None, None
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int:
'''simple docstring'''
return self.d_model
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : int) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
return 12
| 87 | """simple docstring"""
import math
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0 ) -> int:
lowercase__: Dict = sum(i * i for i in range(1 , n + 1 ) )
lowercase__: int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 586 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''roformer'''
def __init__( self : Dict , __UpperCamelCase : Any=5_0000 , __UpperCamelCase : int=None , __UpperCamelCase : Optional[int]=768 , __UpperCamelCase : Tuple=12 , __UpperCamelCase : str=12 , __UpperCamelCase : str=3072 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Optional[int]=1536 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : Tuple=1E-12 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : str=False , __UpperCamelCase : Optional[int]=True , **__UpperCamelCase : List[Any] , ) -> str:
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size if embedding_size is None else embedding_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = rotary_value
_UpperCamelCase = use_cache
class UpperCAmelCase_ ( _lowercase):
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 342 | """simple docstring"""
from functools import lru_cache
@lru_cache
def lowercase ( a__ : int ) -> int:
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
if openai_config_file == "":
lowerCamelCase_ : str = OpenAIGPTConfig()
else:
lowerCamelCase_ : List[str] = OpenAIGPTConfig.from_json_file(_lowercase )
lowerCamelCase_ : Any = OpenAIGPTModel(_lowercase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
lowerCamelCase_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCamelCase_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__lowercase : List[Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 422 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowercase : str = logging.get_logger(__name__)
__lowercase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Any = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowercase : List[str] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
__lowercase : Optional[int] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Any = RoFormerTokenizer
def __init__(self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
lowerCamelCase_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , A ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , A ) != strip_accents
):
lowerCamelCase_ : Any = getattr(A , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ : Dict = do_lower_case
lowerCamelCase_ : List[Any] = strip_accents
lowerCamelCase_ : Any = pre_tok_class(**A )
lowerCamelCase_ : str = do_lower_case
def __getstate__(self ):
lowerCamelCase_ : Optional[Any] = self.__dict__.copy()
lowerCamelCase_ : List[Any] = BertPreTokenizer()
return state
def __setstate__(self , A ):
lowerCamelCase_ : str = d
lowerCamelCase_ : List[str] = self.__dict__['''_tokenizer'''].get_vocab()
lowerCamelCase_ : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(A ) )
def UpperCAmelCase__ (self , A , A=None ):
lowerCamelCase_ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Optional[int] = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def UpperCAmelCase__ (self , A , A=None , A=None , A=False , **A , ):
lowerCamelCase_ : str = BertPreTokenizer()
return super().save_pretrained(A , A , A , A , **A )
| 422 | 1 |
'''simple docstring'''
import random
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
UpperCamelCase = items[random.randint(0 , len(_lowercase ) - 1 )]
UpperCamelCase = 0
UpperCamelCase = _partition(_lowercase , _lowercase )
UpperCamelCase = len(_lowercase )
UpperCamelCase = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 712 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( __UpperCamelCase )-> str:
return EnvironmentCommand()
def lowercase__ ( __UpperCamelCase )-> str:
return EnvironmentCommand(args.accelerate_config_file )
class a_ ( lowerCamelCase ):
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = parser.add_parser("""env""" )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
download_parser.add_argument(
"""--accelerate-config_file""" , default=_SCREAMING_SNAKE_CASE , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = accelerate_config_file
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = """not installed"""
if is_safetensors_available():
import safetensors
UpperCamelCase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
UpperCamelCase = F"{safetensors.__version__} but is ignored because of PyTorch version too old."
UpperCamelCase = """not installed"""
UpperCamelCase = UpperCamelCase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
UpperCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
UpperCamelCase = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else F"\t{accelerate_config}"
)
UpperCamelCase = """not installed"""
UpperCamelCase = """NA"""
if is_torch_available():
import torch
UpperCamelCase = torch.__version__
UpperCamelCase = torch.cuda.is_available()
UpperCamelCase = """not installed"""
UpperCamelCase = """NA"""
if is_tf_available():
import tensorflow as tf
UpperCamelCase = tf.__version__
try:
# deprecated in v2.1
UpperCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
UpperCamelCase = bool(tf.config.list_physical_devices("""GPU""" ) )
UpperCamelCase = """not installed"""
UpperCamelCase = """not installed"""
UpperCamelCase = """not installed"""
UpperCamelCase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
UpperCamelCase = flax.__version__
UpperCamelCase = jax.__version__
UpperCamelCase = jaxlib.__version__
UpperCamelCase = jax.lib.xla_bridge.get_backend().platform
UpperCamelCase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F"{safetensors_version}",
"""Accelerate version""": F"{accelerate_version}",
"""Accelerate config""": F"{accelerate_config_str}",
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""Tensorflow version (GPU?)""": F"{tf_version} ({tf_cuda_available})",
"""Flax version (CPU?/GPU?/TPU?)""": F"{flax_version} ({jax_backend})",
"""Jax version""": F"{jax_version}",
"""JaxLib version""": F"{jaxlib_version}",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(_SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 35 | 0 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : list[tuple[float, float]] ):
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(lowercase__ ) - 1
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,lowercase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase__ ) ,5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(lowercase__ )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : float = 0.0_1 ):
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(lowercase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
lowercase__ ,lowercase__ ,color='''blue''' ,label='''Curve of Degree ''' + str(self.degree ) ,)
plt.scatter(lowercase__ ,lowercase__ ,color='''red''' ,label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 41 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Tuple = "pt"
elif is_tf_available():
A : Optional[int] = "tf"
else:
A : Optional[Any] = "jax"
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ByTaTokenizer
lowerCamelCase__ = False
def __A ( self : Union[str, Any] ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self : Tuple ) -> Union[str, Any]:
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def __A ( self : List[Any] , **__magic_name__ : Dict ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __A ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[int]=False , __magic_name__ : Optional[int]=20 , __magic_name__ : Any=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=__magic_name__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda __magic_name__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda __magic_name__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__magic_name__ ) , __magic_name__ ) )
if max_length is not None and len(__magic_name__ ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(__magic_name__ ) < min_length and len(__magic_name__ ) > 0:
while len(__magic_name__ ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
if " " not in output_txt and len(__magic_name__ ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__magic_name__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__magic_name__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = " " + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
return output_txt, output_ids
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
SCREAMING_SNAKE_CASE_ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def __A ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = "Unicode €."
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , __magic_name__ )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , "Unicode €.</s>" )
SCREAMING_SNAKE_CASE_ = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , __magic_name__ )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __magic_name__ )
self.assertIn("attention_mask" , __magic_name__ )
self.assertNotIn("decoder_input_ids" , __magic_name__ )
self.assertNotIn("decoder_attention_mask" , __magic_name__ )
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="max_length" , truncation=__magic_name__ , return_tensors=__magic_name__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization. </s>"]
SCREAMING_SNAKE_CASE_ = ["Summary of the text. </s>"]
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , text_target=__magic_name__ )
self.assertEqual(__magic_name__ , batch["input_ids"][0] )
self.assertEqual(__magic_name__ , batch["labels"][0] )
def __A ( self : Dict ) -> List[str]:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__magic_name__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
__magic_name__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__magic_name__ )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def __A ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(__magic_name__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def __A ( self : Optional[int] ) -> List[Any]:
pass
def __A ( self : List[Any] ) -> List[Any]:
pass
def __A ( self : List[str] ) -> List[str]:
pass
def __A ( self : Any ) -> Union[str, Any]:
pass
def __A ( self : List[Any] ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=__magic_name__ , do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
__magic_name__ , skip_special_tokens=__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + "_id" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + "_id" ) , __magic_name__ )
setattr(__magic_name__ , attr + "_id" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + "_id" ) , __magic_name__ )
setattr(__magic_name__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens_ids" ) , [] )
setattr(__magic_name__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 140 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : str = """git_vision_model"""
def __init__( self : str , __UpperCamelCase : Union[str, Any]=7_6_8 , __UpperCamelCase : str=3_0_7_2 , __UpperCamelCase : Tuple=1_2 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : str=3 , __UpperCamelCase : Dict=2_2_4 , __UpperCamelCase : Union[str, Any]=1_6 , __UpperCamelCase : Optional[int]="quick_gelu" , __UpperCamelCase : Optional[int]=1E-5 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : List[str]=0.02 , **__UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(**__UpperCamelCase )
snake_case__ = hidden_size
snake_case__ = intermediate_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = num_channels
snake_case__ = patch_size
snake_case__ = image_size
snake_case__ = initializer_range
snake_case__ = attention_dropout
snake_case__ = layer_norm_eps
snake_case__ = hidden_act
@classmethod
def __lowerCAmelCase( cls : Dict , __UpperCamelCase : Union[str, os.PathLike] , **__UpperCamelCase : Optional[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(__UpperCamelCase )
snake_case__ , snake_case__ = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
snake_case__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : Union[str, Any] = """git"""
def __init__( self : str , __UpperCamelCase : int=None , __UpperCamelCase : List[Any]=3_0_5_2_2 , __UpperCamelCase : Dict=7_6_8 , __UpperCamelCase : int=6 , __UpperCamelCase : Any=1_2 , __UpperCamelCase : Optional[Any]=3_0_7_2 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : List[str]=1_0_2_4 , __UpperCamelCase : List[str]=0.02 , __UpperCamelCase : Dict=1E-12 , __UpperCamelCase : Any=0 , __UpperCamelCase : Dict="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=False , __UpperCamelCase : Union[str, Any]=1_0_1 , __UpperCamelCase : str=1_0_2 , __UpperCamelCase : str=None , **__UpperCamelCase : str , ):
'''simple docstring'''
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , pad_token_id=__UpperCamelCase , **__UpperCamelCase )
if vision_config is None:
snake_case__ = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
snake_case__ = GitVisionConfig(**__UpperCamelCase )
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = position_embedding_type
snake_case__ = use_cache
snake_case__ = tie_word_embeddings
snake_case__ = num_image_with_embedding
snake_case__ = bos_token_id
snake_case__ = eos_token_id
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = copy.deepcopy(self.__dict__ )
snake_case__ = self.vision_config.to_dict()
snake_case__ = self.__class__.model_type
return output | 566 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a__ = logging.get_logger(__name__)
def snake_case__ ( a , a ) -> Optional[int]:
'''simple docstring'''
snake_case__ = set()
snake_case__ = []
def parse_line(a ):
for line in fp:
if isinstance(a , a ):
snake_case__ = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(a ) > 0:
snake_case__ = """\n""".join(a )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(a )
buffer.clear()
continue
else:
snake_case__ = line.strip()
buffer.append(a )
if from_gh:
for filename in os.listdir(a ):
snake_case__ = os.path.join(a , a )
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with open(a ) as fp:
parse_line(a )
else:
try:
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a ) as fp:
parse_line(a )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def snake_case__ ( a , a ) -> int:
'''simple docstring'''
snake_case__ = set()
snake_case__ = [os.path.join(a , a ) for p in os.listdir(a ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a , a ) )
return selected_warnings
if __name__ == "__main__":
def snake_case__ ( a ) -> int:
'''simple docstring'''
return values.split(""",""" )
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
a__ = parser.parse_args()
a__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a__ = extract_warnings(args.output_dir, args.targets)
a__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 566 | 1 |
'''simple docstring'''
def A (__lowerCamelCase :Any , __lowerCamelCase :str , __lowerCamelCase :List[Any] , __lowerCamelCase :int , __lowerCamelCase :Tuple , __lowerCamelCase :Any ):
if index == r:
for j in range(__lowerCamelCase ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_lowerCAmelCase = arr[i]
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :str , __lowerCamelCase :int ):
# A temporary array to store all combination one by one
_lowerCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 , __lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowercase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 5 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5 | 1 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = prime_factors(lowercase_ )
if is_square_free(lowercase_ ):
return -1 if len(lowercase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( lowercase_ = 1_000_000 ,lowercase_ = 10 ) -> int:
"""simple docstring"""
_UpperCamelCase : defaultdict = defaultdict(lowercase_ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_UpperCamelCase : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 51 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "pix2struct_text_model"
UpperCAmelCase_ = ["past_key_values"]
UpperCAmelCase_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int], _UpperCAmelCase : Union[str, Any]=5_0_2_4_4, _UpperCAmelCase : Union[str, Any]=7_6_8, _UpperCAmelCase : Optional[Any]=6_4, _UpperCAmelCase : Any=2_0_4_8, _UpperCAmelCase : int=1_2, _UpperCAmelCase : Optional[int]=1_2, _UpperCAmelCase : Any=3_2, _UpperCAmelCase : int=1_2_8, _UpperCAmelCase : str=0.1, _UpperCAmelCase : List[Any]=1E-6, _UpperCAmelCase : Any=1.0, _UpperCAmelCase : int="gelu_new", _UpperCAmelCase : Optional[int]=0, _UpperCAmelCase : Optional[int]=False, _UpperCAmelCase : List[str]=0, _UpperCAmelCase : Any=1, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[str]=True, **_UpperCAmelCase : Optional[Any], ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : str = d_kv
SCREAMING_SNAKE_CASE__ : int = d_ff
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE__ : Tuple = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Optional[int] = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[str] = initializer_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = use_cache
SCREAMING_SNAKE_CASE__ : Union[str, Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : int = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE__ : Dict = dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, decoder_start_token_id=_UpperCAmelCase, tie_word_embeddings=_UpperCAmelCase, is_decoder=_UpperCAmelCase, **_UpperCAmelCase, )
@classmethod
def A_ ( cls : List[str], _UpperCAmelCase : Union[str, os.PathLike], **_UpperCAmelCase : Dict ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = cls.get_config_dict(_UpperCAmelCase, **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase, **_UpperCAmelCase )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "pix2struct_vision_model"
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any]=7_6_8, _UpperCAmelCase : Union[str, Any]=7_6_8, _UpperCAmelCase : Any=2_0_4_8, _UpperCAmelCase : Tuple=6_4, _UpperCAmelCase : Dict=1_2, _UpperCAmelCase : Optional[Any]=1_2, _UpperCAmelCase : Union[str, Any]="gelu_new", _UpperCAmelCase : Any=1E-6, _UpperCAmelCase : Optional[Any]=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Any=1E-10, _UpperCAmelCase : Union[str, Any]=1.0, _UpperCAmelCase : int=4_0_9_6, _UpperCAmelCase : List[str]=3_2, _UpperCAmelCase : List[str]=1_2_8, **_UpperCAmelCase : Any, ) -> List[str]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Any = patch_embed_hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = d_ff
SCREAMING_SNAKE_CASE__ : Optional[Any] = dropout_rate
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = initializer_factor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = dense_act_fn
SCREAMING_SNAKE_CASE__ : int = seq_len
SCREAMING_SNAKE_CASE__ : List[str] = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Optional[int] = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : Dict = d_kv
@classmethod
def A_ ( cls : int, _UpperCAmelCase : Union[str, os.PathLike], **_UpperCAmelCase : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = cls.get_config_dict(_UpperCAmelCase, **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase, **_UpperCAmelCase )
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "pix2struct"
UpperCAmelCase_ = True
def __init__( self : Optional[Any], _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : Optional[Any]=None, _UpperCAmelCase : Dict=1.0, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Dict=False, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : Union[str, Any]=True, **_UpperCAmelCase : str, ) -> Tuple:
"""simple docstring"""
super().__init__(tie_word_embeddings=_UpperCAmelCase, is_encoder_decoder=_UpperCAmelCase, **_UpperCAmelCase )
if text_config is None:
SCREAMING_SNAKE_CASE__ : Dict = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PixaStructTextConfig(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = PixaStructVisionConfig(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ : List[str] = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE__ : Dict = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE__ : Dict = initializer_factor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = self.initializer_range
SCREAMING_SNAKE_CASE__ : Any = self.initializer_range
SCREAMING_SNAKE_CASE__ : int = is_vqa
@classmethod
def A_ ( cls : Dict, _UpperCAmelCase : PixaStructTextConfig, _UpperCAmelCase : PixaStructVisionConfig, **_UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **_UpperCAmelCase )
def A_ ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : str = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : List[Any] = self.__class__.model_type
return output
| 663 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : Tuple, _UpperCAmelCase : str=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.image_processor
SCREAMING_SNAKE_CASE__ : Any = False
def __call__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = args[0]
SCREAMING_SNAKE_CASE__ : str = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["input_ids"]
return inputs
def A_ ( self : Dict, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[str], *_UpperCAmelCase : int, **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : str = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : Any = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : Dict = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : Any = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group()
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : str = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : str = value[0]
SCREAMING_SNAKE_CASE__ : List[str] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : str = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : str = output[key][0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 663 | 1 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase__ = Lock()
def snake_case_ ( A_ : List[Any], A_ : Union[str, Any], A_ : Tuple, A_ : List[Any], A_ : str, A_ : List[Any], A_ : Union[str, Any] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowerCamelCase : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowerCamelCase : str = min(A_, A_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowerCamelCase : List[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowerCamelCase : List[str] = max(A_, A_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A_ )
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = []
_lowerCamelCase : Any = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowerCamelCase : Dict = Pipe()
_lowerCamelCase : Optional[Any] = Pipe()
process_array_.append(
Process(
target=A_, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
_lowerCamelCase : Any = temp_rs
_lowerCamelCase : List[Any] = temp_rr
for i in range(1, len(A_ ) - 1 ):
_lowerCamelCase : Optional[Any] = Pipe()
_lowerCamelCase : Any = Pipe()
process_array_.append(
Process(
target=A_, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
_lowerCamelCase : Tuple = temp_rs
_lowerCamelCase : int = temp_rr
process_array_.append(
Process(
target=A_, args=(
len(A_ ) - 1,
arr[len(A_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A_ ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(A_ ) ):
_lowerCamelCase : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = list(range(10, 0, -1 ) )
print('''Initial List''' )
print(*A_ )
_lowerCamelCase : List[Any] = odd_even_transposition(A_ )
print('''Sorted List\n''' )
print(*A_ )
if __name__ == "__main__":
main()
| 598 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase):
@parameterized.expand([(None,), ('''foo.json''',)] )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase )
_lowerCamelCase : int = GenerationConfig.from_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoConfig.from_pretrained('''gpt2''' )
_lowerCamelCase : List[Any] = GenerationConfig.from_model_config(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = GenerationConfig()
_lowerCamelCase : Any = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
_lowerCamelCase : Optional[Any] = copy.deepcopy(__lowerCAmelCase )
_lowerCamelCase : List[str] = generation_config.update(**__lowerCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCAmelCase , {'''foo''': '''bar'''} )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = GenerationConfig()
_lowerCamelCase : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(__lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
_lowerCamelCase : Any = GenerationConfig.from_model_config(__lowerCAmelCase )
assert not hasattr(__lowerCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __lowerCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
_lowerCamelCase : int = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __lowerCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(__lowerCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id='''test-generation-config''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : str = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
| 598 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = field(default=A , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCAmelCase_ = field(
default=A , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCAmelCase_ = field(
default=A , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =super().to_dict()
for k, v in d.items():
if isinstance(__lowercase , __lowercase ):
__lowercase =v.to_dict()
return d
| 119 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase ( enum.Enum ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(A )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "generated"
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def snake_case ( self : Dict , __lowercase : Any=None , __lowercase : int=None , __lowercase : List[str]=None , __lowercase : int=None , __lowercase : str=None , __lowercase : List[Any]=None , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
__lowercase ={}
if truncation is not None:
__lowercase =truncation
__lowercase =generate_kwargs
__lowercase ={}
if return_tensors is not None and return_type is None:
__lowercase =ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__lowercase =return_type
if clean_up_tokenization_spaces is not None:
__lowercase =clean_up_tokenization_spaces
if stop_sequence is not None:
__lowercase =self.tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
if len(__lowercase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowercase =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case ( self : List[Any] , __lowercase : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
return True
def snake_case ( self : Optional[Any] , *__lowercase : Optional[int] , __lowercase : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , __lowercase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
__lowercase =([prefix + arg for arg in args[0]],)
__lowercase =True
elif isinstance(args[0] , __lowercase ):
__lowercase =(prefix + args[0],)
__lowercase =False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
__lowercase =self.tokenizer(*__lowercase , padding=__lowercase , truncation=__lowercase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Any , *__lowercase : int , **__lowercase : Optional[Any] ):
"""simple docstring"""
__lowercase =super().__call__(*__lowercase , **__lowercase )
if (
isinstance(args[0] , __lowercase )
and all(isinstance(__lowercase , __lowercase ) for el in args[0] )
and all(len(__lowercase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def snake_case ( self : List[str] , __lowercase : Tuple , __lowercase : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **__lowercase : Dict ):
"""simple docstring"""
__lowercase =self._parse_and_tokenize(__lowercase , truncation=__lowercase , **__lowercase )
return inputs
def snake_case ( self : Optional[int] , __lowercase : Tuple , **__lowercase : Tuple ):
"""simple docstring"""
if self.framework == "pt":
__lowercase , __lowercase =model_inputs['input_ids'].shape
elif self.framework == "tf":
__lowercase , __lowercase =tf.shape(model_inputs['input_ids'] ).numpy()
__lowercase =generate_kwargs.get('min_length' , self.model.config.min_length )
__lowercase =generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(__lowercase , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
__lowercase =self.model.generate(**__lowercase , **__lowercase )
__lowercase =output_ids.shape[0]
if self.framework == "pt":
__lowercase =output_ids.reshape(__lowercase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__lowercase =tf.reshape(__lowercase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def snake_case ( self : str , __lowercase : Optional[Any] , __lowercase : Optional[int]=ReturnType.TEXT , __lowercase : Union[str, Any]=False ):
"""simple docstring"""
__lowercase =[]
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__lowercase ={f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
__lowercase ={
f'''{self.return_name}_text''': self.tokenizer.decode(
__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , )
}
records.append(__lowercase )
return records
@add_end_docstrings(A )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "summary"
def __call__( self : Dict , *__lowercase : Dict , **__lowercase : Dict ):
"""simple docstring"""
return super().__call__(*__lowercase , **__lowercase )
def snake_case ( self : str , __lowercase : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(A )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "translation"
def snake_case ( self : int , __lowercase : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def snake_case ( self : int , *__lowercase : str , __lowercase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , __lowercase : List[Any]=None , __lowercase : Optional[int]=None ):
"""simple docstring"""
if getattr(self.tokenizer , '_build_translation_inputs' , __lowercase ):
return self.tokenizer._build_translation_inputs(
*__lowercase , return_tensors=self.framework , truncation=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase )
else:
return super()._parse_and_tokenize(*__lowercase , truncation=__lowercase )
def snake_case ( self : Optional[int] , __lowercase : List[str]=None , __lowercase : Optional[Any]=None , **__lowercase : int ):
"""simple docstring"""
__lowercase , __lowercase , __lowercase =super()._sanitize_parameters(**__lowercase )
if src_lang is not None:
__lowercase =src_lang
if tgt_lang is not None:
__lowercase =tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__lowercase =kwargs.get('task' , self.task )
__lowercase =task.split('_' )
if task and len(__lowercase ) == 4:
# translation, XX, to YY
__lowercase =items[1]
__lowercase =items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] , *__lowercase : Any , **__lowercase : List[Any] ):
"""simple docstring"""
return super().__call__(*__lowercase , **__lowercase )
| 119 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1_00 , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Optional[Any] = 1_00
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : List[Any] = image_size
__lowerCAmelCase : Optional[Any] = patch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : str = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = type_sequence_label_size
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : List[str] = scope
__lowerCAmelCase : int = out_indices
__lowerCAmelCase : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : Dict = (image_size // patch_size) ** 2
__lowerCAmelCase : str = num_patches + 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
if self.use_labels:
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCamelCase ( self ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = BeitModel(config=a_ )
model.to(a_ )
model.eval()
__lowerCAmelCase : Dict = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.type_sequence_label_size
__lowerCAmelCase : Any = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
__lowerCAmelCase : List[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase : Any = 1
__lowerCAmelCase : str = BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
__lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase : Optional[Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = self.num_labels
__lowerCAmelCase : List[Any] = BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__lowerCAmelCase : List[str] = model(a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__lowerCAmelCase : str = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
__lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __a , __a , unittest.TestCase):
A_ : Dict = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Optional[int] = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : int = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = BeitModelTester(self )
__lowerCAmelCase : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = model_class(a_ )
__lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a_ )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def __lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
__lowerCAmelCase : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
__lowerCAmelCase : Dict = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__lowerCAmelCase : List[Any] = model(**a_ ).loss
loss.backward()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__lowerCAmelCase : Any = model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
__lowerCAmelCase : Any = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__lowerCAmelCase : int = model(**a_ ).loss
loss.backward()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : int = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__lowerCAmelCase : Tuple = model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def __lowerCamelCase ( self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __lowerCAmelCase ():
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(a_ )
__lowerCAmelCase : Dict = self.default_image_processor
__lowerCAmelCase : Dict = prepare_img()
__lowerCAmelCase : List[str] = image_processor(images=a_ , return_tensors='pt' ).pixel_values.to(a_ )
# prepare bool_masked_pos
__lowerCAmelCase : Optional[int] = torch.ones((1, 1_96) , dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase : int = model(pixel_values=a_ , bool_masked_pos=a_ )
__lowerCAmelCase : Dict = outputs.logits
# verify the logits
__lowerCAmelCase : Optional[int] = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , a_ )
__lowerCAmelCase : Optional[Any] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , a_ , atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(a_ )
__lowerCAmelCase : List[Any] = self.default_image_processor
__lowerCAmelCase : Any = prepare_img()
__lowerCAmelCase : Any = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase : int = model(**a_ )
__lowerCAmelCase : Optional[int] = outputs.logits
# verify the logits
__lowerCAmelCase : Tuple = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , a_ )
__lowerCAmelCase : Any = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3] , a_ , atol=1E-4 ) )
__lowerCAmelCase : str = 2_81
self.assertEqual(logits.argmax(-1 ).item() , a_ )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
a_ )
__lowerCAmelCase : int = self.default_image_processor
__lowerCAmelCase : Optional[Any] = prepare_img()
__lowerCAmelCase : Union[str, Any] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**a_ )
__lowerCAmelCase : Dict = outputs.logits
# verify the logits
__lowerCAmelCase : Tuple = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , a_ )
__lowerCAmelCase : Optional[int] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3] , a_ , atol=1E-4 ) )
__lowerCAmelCase : List[str] = 23_96
self.assertEqual(logits.argmax(-1 ).item() , a_ )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__lowerCAmelCase : int = model.to(a_ )
__lowerCAmelCase : List[str] = BeitImageProcessor(do_resize=a_ , size=6_40 , do_center_crop=a_ )
__lowerCAmelCase : Optional[int] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__lowerCAmelCase : Union[str, Any] = Image.open(ds[0]['file'] )
__lowerCAmelCase : List[Any] = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**a_ )
__lowerCAmelCase : Union[str, Any] = outputs.logits
# verify the logits
__lowerCAmelCase : List[str] = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , a_ )
__lowerCAmelCase : Optional[int] = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
__lowerCAmelCase : Any = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=a_ , )
else:
__lowerCAmelCase : Optional[Any] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=a_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1E-4 ) )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
__lowerCAmelCase : List[Any] = model.to(a_ )
__lowerCAmelCase : Tuple = BeitImageProcessor(do_resize=a_ , size=6_40 , do_center_crop=a_ )
__lowerCAmelCase : Union[str, Any] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__lowerCAmelCase : str = Image.open(ds[0]['file'] )
__lowerCAmelCase : Tuple = image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(**a_ )
__lowerCAmelCase : Union[str, Any] = outputs.logits.detach().cpu()
__lowerCAmelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(5_00, 3_00)] )
__lowerCAmelCase : Optional[int] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , a_ )
__lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=a_ )
__lowerCAmelCase : List[str] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , a_ ) | 706 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = False ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = F"Expected string as input, found {type(_UpperCamelCase )}"
raise ValueError(_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = F"Expected boolean as use_pascal parameter, found {type(_UpperCamelCase )}"
raise ValueError(_UpperCamelCase )
__lowerCAmelCase : Tuple = input_str.split('_' )
__lowerCAmelCase : int = 0 if use_pascal else 1
__lowerCAmelCase : Any = words[start_index:]
__lowerCAmelCase : Any = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowerCAmelCase : Any = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod() | 549 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : float, lowercase__ : float, lowercase__ : float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : Tuple ):
'''simple docstring'''
__lowercase =[0 for i in range(r + 1 )]
# nc0 = 1
__lowercase =1
for i in range(1, n + 1 ):
# to compute current row from previous row.
__lowercase =min(lowercase__, lowercase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 119 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
_snake_case = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 413 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_snake_case = logging.get_logger(__name__)
def lowerCamelCase_ ( A : List[str] , A : Optional[int] ):
"""simple docstring"""
lowerCAmelCase_ = nn.functional.normalize(A )
lowerCAmelCase_ = nn.functional.normalize(A )
return torch.mm(A , normalized_text_embeds.t() )
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Tuple = CLIPConfig
a :int = ['CLIPEncoderLayer']
def __init__( self , _UpperCAmelCase):
super().__init__(_UpperCAmelCase)
lowerCAmelCase_ = CLIPVisionModel(config.vision_config)
lowerCAmelCase_ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(17 , config.projection_dim) , requires_grad=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(3 , config.projection_dim) , requires_grad=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(17) , requires_grad=_UpperCAmelCase)
lowerCAmelCase_ = nn.Parameter(torch.ones(3) , requires_grad=_UpperCAmelCase)
@torch.no_grad()
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = self.vision_model(_UpperCAmelCase)[1] # pooled_output
lowerCAmelCase_ = self.visual_projection(_UpperCAmelCase)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.special_care_embeds).cpu().float().numpy()
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.concept_embeds).cpu().float().numpy()
lowerCAmelCase_ = []
lowerCAmelCase_ = image_embeds.shape[0]
for i in range(_UpperCAmelCase):
lowerCAmelCase_ = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase_ = 0.0
for concept_idx in range(len(special_cos_dist[0])):
lowerCAmelCase_ = special_cos_dist[i][concept_idx]
lowerCAmelCase_ = self.special_care_embeds_weights[concept_idx].item()
lowerCAmelCase_ = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]})
lowerCAmelCase_ = 0.01
for concept_idx in range(len(cos_dist[0])):
lowerCAmelCase_ = cos_dist[i][concept_idx]
lowerCAmelCase_ = self.concept_embeds_weights[concept_idx].item()
lowerCAmelCase_ = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_UpperCAmelCase)
result.append(_UpperCAmelCase)
lowerCAmelCase_ = [len(res['''bad_concepts''']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = self.vision_model(_UpperCAmelCase)[1] # pooled_output
lowerCAmelCase_ = self.visual_projection(_UpperCAmelCase)
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.special_care_embeds)
lowerCAmelCase_ = cosine_distance(_UpperCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCAmelCase_ = torch.any(special_scores > 0 , dim=1)
lowerCAmelCase_ = special_care * 0.01
lowerCAmelCase_ = special_adjustment.unsqueeze(1).expand(-1 , cos_dist.shape[1])
lowerCAmelCase_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCAmelCase_ = torch.any(concept_scores > 0 , dim=1)
return images, has_nsfw_concepts
| 413 | 1 |
from math import isqrt
def UpperCamelCase ( snake_case__ : int ) -> list[int]:
UpperCamelCase : Union[str, Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__ ):
UpperCamelCase : List[str] = False
return [i for i in range(2 , snake_case__ ) if is_prime[i]]
def UpperCamelCase ( snake_case__ : int = 10**8 ) -> int:
UpperCamelCase : Union[str, Any] = calculate_prime_numbers(max_number // 2 )
UpperCamelCase : List[Any] = 0
UpperCamelCase : List[Any] = 0
UpperCamelCase : Tuple = len(snake_case__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 40 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = MobileBertTokenizer
__lowerCamelCase = MobileBertTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = filter_non_english
__lowerCamelCase = 'google/mobilebert-uncased'
def __UpperCAmelCase ( self ):
super().setUp()
UpperCAmelCase__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase__ : List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Union[str, Any] = """unwanted, running"""
return input_text, output_text
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = self.tokenizer_class(self.vocab_file )
UpperCAmelCase__ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase__ : Any = tokenizer.encode(_lowerCAmelCase )
UpperCAmelCase__ : str = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# With lower casing
UpperCAmelCase__ : Tuple = self.get_tokenizer(do_lower_case=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.get_rust_tokenizer(do_lower_case=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : int = tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase__ : Any = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : List[str] = tokenizer.encode(_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = BasicTokenizer(do_lower_case=_lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase__ : List[str] = {}
for i, token in enumerate(_lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = i
UpperCAmelCase__ : str = WordpieceTokenizer(vocab=_lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
UpperCAmelCase__ : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
UpperCAmelCase__ : Optional[Any] = tokenizer_r.encode_plus(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , )
UpperCAmelCase__ : Any = tokenizer_r.do_lower_case if hasattr(_lowerCAmelCase , """do_lower_case""" ) else False
UpperCAmelCase__ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = ["""的""", """人""", """有"""]
UpperCAmelCase__ : Tuple = """""".join(_lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Any = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase__ : List[str] = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_lowerCAmelCase )
]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 79 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__magic_name__ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
if args.student_type == "roberta":
snake_case__ = False
elif args.student_type == "gpt2":
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
if args.student_type == "roberta":
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=_UpperCamelCase , required=_UpperCamelCase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=_UpperCamelCase , required=_UpperCamelCase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=_UpperCamelCase , choices=["distilbert", "roberta", "gpt2"] , required=_UpperCamelCase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=_UpperCamelCase , required=_UpperCamelCase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=_UpperCamelCase , type=_UpperCamelCase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=_UpperCamelCase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=_UpperCamelCase , required=_UpperCamelCase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=_UpperCamelCase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=_UpperCamelCase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=_UpperCamelCase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=_UpperCamelCase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=_UpperCamelCase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=_UpperCamelCase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=_UpperCamelCase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=_UpperCamelCase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=_UpperCamelCase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=_UpperCamelCase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=_UpperCamelCase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=_UpperCamelCase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only." , )
parser.add_argument("--n_epoch" , type=_UpperCamelCase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=_UpperCamelCase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_UpperCamelCase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=_UpperCamelCase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=_UpperCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=_UpperCamelCase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=_UpperCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=_UpperCamelCase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=_UpperCamelCase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_UpperCamelCase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=_UpperCamelCase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=_UpperCamelCase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=_UpperCamelCase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=_UpperCamelCase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=_UpperCamelCase , default=4_000 , help="Checkpoint interval." )
snake_case__ = parser.parse_args()
sanity_checks(_UpperCamelCase )
# ARGS #
init_gpu_params(_UpperCamelCase )
set_seed(_UpperCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(_UpperCamelCase ) , _UpperCamelCase , indent=4 )
git_log(args.dump_path )
snake_case__ , snake_case__ , snake_case__ = MODEL_CLASSES[args.student_type]
snake_case__ , snake_case__ , snake_case__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case__ = tokenizer.all_special_tokens.index(_UpperCamelCase )
snake_case__ = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
snake_case__ = special_tok_ids
snake_case__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , "rb" ) as fp:
snake_case__ = pickle.load(_UpperCamelCase )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , "rb" ) as fp:
snake_case__ = pickle.load(_UpperCamelCase )
snake_case__ = np.maximum(_UpperCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case__ = 0.0 # do not predict special tokens
snake_case__ = torch.from_numpy(_UpperCamelCase )
else:
snake_case__ = None
snake_case__ = LmSeqsDataset(params=_UpperCamelCase , data=_UpperCamelCase )
logger.info("Data loader created." )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
snake_case__ = student_config_class.from_pretrained(args.student_config )
snake_case__ = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
snake_case__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=_UpperCamelCase )
else:
snake_case__ = student_model_class(_UpperCamelCase )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
snake_case__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_UpperCamelCase )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_UpperCamelCase , _UpperCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_UpperCamelCase , _UpperCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case__ = Distiller(
params=_UpperCamelCase , dataset=_UpperCamelCase , token_probs=_UpperCamelCase , student=_UpperCamelCase , teacher=_UpperCamelCase )
distiller.train()
logger.info("Let\'s go get some drinks." )
if __name__ == "__main__":
main()
| 716 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = [False] * len(__lowerCAmelCase )
snake_case__ = [-1] * len(__lowerCAmelCase )
def dfs(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = True
snake_case__ = c
for u in graph[v]:
if not visited[u]:
dfs(__lowerCAmelCase , 1 - c )
for i in range(len(__lowerCAmelCase ) ):
if not visited[i]:
dfs(__lowerCAmelCase , 0 )
for i in range(len(__lowerCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__magic_name__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 530 | 0 |
def a__ ( ):
return [
a * b * (10_00 - a - b)
for a in range(1 ,9_99 )
for b in range(_UpperCamelCase ,9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"{solution() = }")
| 175 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """spiece.model"""}
a_ = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
a_ = {
"""AI-Sweden/gpt-sw3-126m""": 2_048,
"""AI-Sweden/gpt-sw3-350m""": 2_048,
"""AI-Sweden/gpt-sw3-1.6b""": 2_048,
"""AI-Sweden/gpt-sw3-6.7b""": 2_048,
"""AI-Sweden/gpt-sw3-20b""": 2_048,
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCamelCase = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
__lowerCamelCase = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__lowerCamelCase = '''<|endoftext|>''' if eos_token is None else eos_token
__lowerCamelCase = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__lowerCamelCase = unk_token if pad_token is None else pad_token
__lowerCamelCase = eos_token if bos_token is None else bos_token
else:
__lowerCamelCase = '''<pad>''' if pad_token is None else pad_token
__lowerCamelCase = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
__lowerCamelCase = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__lowerCamelCase = re.compile(
F"""[{"".join(map(__UpperCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ):
'''simple docstring'''
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.non_printing_characters_re.sub('''''' , __UpperCAmelCase )
# Normalize whitespaces
__lowerCamelCase = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
__lowerCamelCase = unicodedata.normalize('''NFC''' , __UpperCAmelCase )
return text
def lowerCamelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.preprocess_text(__UpperCAmelCase )
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCAmelCase )
@staticmethod
def lowerCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
return out_string
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__lowerCamelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = self.preprocess_text(__UpperCAmelCase )
__lowerCamelCase = self.sp_model.encode(__UpperCAmelCase )
else:
__lowerCamelCase = [self.preprocess_text(__UpperCAmelCase ) for t in text]
__lowerCamelCase = self.sp_model.encode(__UpperCAmelCase )
if return_tensors is True or return_tensors == "pt":
__lowerCamelCase = torch.tensor(__UpperCAmelCase )
return token_ids
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.decode(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__lowerCamelCase = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__UpperCAmelCase )
| 175 | 1 |
import math
def _a ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ : int = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase__ )
if number < 1:
lowerCamelCase_ : List[Any] = F'Input value of [number={number}] must be > 0'
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCamelCase_ : Optional[int] = int(math.log(number // 3 , 2 ) ) + 2
lowerCamelCase_ : Any = [3, 5]
lowerCamelCase_ : Optional[int] = 2
lowerCamelCase_ : int = 3
for block in range(1 , lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
UpperCamelCase = 0
try:
UpperCamelCase = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 144 |
from PIL import Image
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> Image:
def brightness(lowerCamelCase__ ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(lowerCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 1_0_0)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 144 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''unispeech-sat'''
def __init__( self , lowerCamelCase_=3_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase_=(1_0, 3, 3, 3, 3, 2, 2) , lowerCamelCase_=False , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=1_0 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=1_0 , lowerCamelCase_=0 , lowerCamelCase_=3_2_0 , lowerCamelCase_=2 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_0 , lowerCamelCase_=2_5_6 , lowerCamelCase_=2_5_6 , lowerCamelCase_=0.1 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_5_6 , lowerCamelCase_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowerCamelCase_=(5, 3, 3, 1, 1) , lowerCamelCase_=(1, 2, 3, 1, 1) , lowerCamelCase_=5_1_2 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , lowerCamelCase_=5_0_4 , **lowerCamelCase_ , ) -> List[Any]:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_)
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim)
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = num_clusters
UpperCamelCase = do_stable_layer_norm
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase = num_codevectors_per_group
UpperCamelCase = num_codevector_groups
UpperCamelCase = contrastive_logits_temperature
UpperCamelCase = feat_quantizer_dropout
UpperCamelCase = num_negatives
UpperCamelCase = codevector_dim
UpperCamelCase = proj_codevector_dim
UpperCamelCase = diversity_loss_weight
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = list(lowerCamelCase_)
UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase__ ( self) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1) | 34 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'openbmb/cpm-ant-10b': 1024,
}
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict()
with open(_lowercase ,'''r''' ,encoding='''utf-8''' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
UpperCamelCase = token.rstrip('''\n''' )
UpperCamelCase = index
return vocab
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_=2_0_0) -> Any:
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
UpperCamelCase = list(lowerCamelCase_)
if len(lowerCamelCase_) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_):
UpperCamelCase = len(lowerCamelCase_)
UpperCamelCase = None
while start < end:
UpperCamelCase = ''''''.join(chars[start:end])
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(lowerCamelCase_)
UpperCamelCase = end
return sub_tokens
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
A_ = False
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<d>" , lowerCamelCase_="</d>" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<unk>" , lowerCamelCase_="</n>" , lowerCamelCase_="</_>" , lowerCamelCase_="left" , **lowerCamelCase_ , ) -> List[str]:
requires_backends(self , ['''jieba'''])
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_)
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token)
@property
def UpperCAmelCase__ ( self) -> Dict:
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self) -> str:
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_))
return output_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> Tuple:
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return token in self.encoder
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return "".join(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
return self.decoder.get(lowerCamelCase_ , self.unk_token)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if os.path.isdir(lowerCamelCase_):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
UpperCamelCase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['''\n''']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_: x[1]))
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''')
UpperCamelCase = token_index
writer.write(token + '''\n''')
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_)) + [1] + ([0] * len(lowerCamelCase_))
return [1] + ([0] * len(lowerCamelCase_)) | 34 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Any:
super().__init__()
lowercase__ = nn.Linear(3 , 4 )
lowercase__ = nn.BatchNormad(4 )
lowercase__ = nn.Linear(4 , 5 )
def snake_case_( self , _lowerCamelCase )-> Any:
return self.lineara(self.batchnorm(self.lineara(_lowerCamelCase ) ) )
class __A ( snake_case_ ):
"""simple docstring"""
def snake_case_( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )-> List[Any]:
return (args[0] + 1,) + args[1:], kwargs
class __A ( snake_case_ ):
"""simple docstring"""
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> str:
return output + 1
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> List[Any]:
lowercase__ = ModelForTest()
lowercase__ = ModelHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(test_model._hf_hook , _lowerCamelCase )
self.assertTrue(hasattr(_lowerCamelCase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(_lowerCamelCase )
self.assertFalse(hasattr(_lowerCamelCase , '''_hf_hook''' ) )
self.assertFalse(hasattr(_lowerCamelCase , '''_old_forward''' ) )
def snake_case_( self )-> int:
lowercase__ = ModelForTest()
lowercase__ = ModelHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
add_hook_to_module(_lowerCamelCase , _lowerCamelCase , append=_lowerCamelCase )
self.assertEqual(isinstance(test_model._hf_hook , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(_lowerCamelCase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(_lowerCamelCase )
self.assertFalse(hasattr(_lowerCamelCase , '''_hf_hook''' ) )
self.assertFalse(hasattr(_lowerCamelCase , '''_old_forward''' ) )
def snake_case_( self )-> Tuple:
lowercase__ = ModelForTest()
lowercase__ = torch.randn(2 , 3 )
lowercase__ = test_model(x + 1 )
lowercase__ = test_model(x + 2 )
lowercase__ = PreForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
lowercase__ = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase__ = PreForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
lowercase__ = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowercase__ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
lowercase__ = test_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 )
def snake_case_( self )-> Union[str, Any]:
lowercase__ = ModelForTest()
lowercase__ = torch.randn(2 , 3 )
lowercase__ = test_model(_lowerCamelCase )
lowercase__ = PostForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
lowercase__ = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase__ = PostForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
lowercase__ = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowercase__ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
lowercase__ = test_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , output + 2 , atol=1e-5 )
def snake_case_( self )-> List[str]:
lowercase__ = ModelForTest()
lowercase__ = torch.randn(2 , 3 )
lowercase__ = test_model(_lowerCamelCase )
lowercase__ = PostForwardHook()
add_hook_to_module(_lowerCamelCase , _lowerCamelCase )
lowercase__ = test_model(_lowerCamelCase )
self.assertTrue(torch.allclose(_lowerCamelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowercase__ = True
lowercase__ = test_model(_lowerCamelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def snake_case_( self )-> Optional[int]:
lowercase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowercase__ = torch.randn(2 , 3 )
lowercase__ = model(_lowerCamelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowerCamelCase , AlignDevicesHook(io_same_device=_lowerCamelCase ) )
lowercase__ = torch.randn(2 , 3 ).to(0 )
lowercase__ = model(_lowerCamelCase )
self.assertEqual(output.device , torch.device(0 ) )
def snake_case_( self )-> str:
lowercase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase__ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase__ = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , _lowerCamelCase )
lowercase__ = torch.randn(2 , 3 )
lowercase__ = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
lowercase__ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowerCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowerCamelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase__ = torch.randn(2 , 3 )
lowercase__ = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def snake_case_( self )-> Optional[int]:
lowercase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase__ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase__ = torch.device(_lowerCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , _lowerCamelCase )
lowercase__ = torch.randn(2 , 3 )
lowercase__ = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase , offload_buffers=_lowerCamelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase__ = torch.randn(2 , 3 )
lowercase__ = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def snake_case_( self )-> Optional[Any]:
lowercase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
lowercase__ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
lowercase__ = torch.device(_lowerCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , _lowerCamelCase )
lowercase__ = torch.randn(2 , 3 )
lowercase__ = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
_lowerCamelCase , execution_device=_lowerCamelCase , offload=_lowerCamelCase , weights_map=model.state_dict() , offload_buffers=_lowerCamelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
lowercase__ = torch.randn(2 , 3 )
lowercase__ = model(_lowerCamelCase )
self.assertEqual(output.device , _lowerCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowerCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 318 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __lowercase ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
a__ = cst_fwd.get(__lowerCAmelCase , np.inf )
a__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
a__ = new_cost_f
a__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
a__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ):
a__ = -1
a__ = set()
a__ = set()
a__ = {source: 0}
a__ = {destination: 0}
a__ = {source: None}
a__ = {destination: None}
a__ = PriorityQueue()
a__ = PriorityQueue()
a__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
a__ , a__ = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
a__ , a__ = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
a__ = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
a__ = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
a__ = shortest_distance
return shortest_path_distance
snake_case : str = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
snake_case : Any = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Dict = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase = logging.get_logger(__name__)
# General docstring
UpperCAmelCase = """ResNetConfig"""
# Base docstring
UpperCAmelCase = """microsoft/resnet-50"""
UpperCAmelCase = [1, 2_048, 7, 7]
# Image classification docstring
UpperCAmelCase = """microsoft/resnet-50"""
UpperCAmelCase = """tiger cat"""
UpperCAmelCase = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ = 3 , a_ = 1 , a_ = "relu" ):
super().__init__()
a__ = nn.Convad(
a_ , a_ , kernel_size=a_ , stride=a_ , padding=kernel_size // 2 , bias=a_ )
a__ = nn.BatchNormad(a_ )
a__ = ACTaFN[activation] if activation is not None else nn.Identity()
def _a ( self , a_ ):
a__ = self.convolution(a_ )
a__ = self.normalization(a_ )
a__ = self.activation(a_ )
return hidden_state
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ ):
super().__init__()
a__ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
a__ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
a__ = config.num_channels
def _a ( self , a_ ):
a__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
a__ = self.embedder(a_ )
a__ = self.pooler(a_ )
return embedding
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ = 2 ):
super().__init__()
a__ = nn.Convad(a_ , a_ , kernel_size=1 , stride=a_ , bias=a_ )
a__ = nn.BatchNormad(a_ )
def _a ( self , a_ ):
a__ = self.convolution(a_ )
a__ = self.normalization(a_ )
return hidden_state
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ = 1 , a_ = "relu" ):
super().__init__()
a__ = in_channels != out_channels or stride != 1
a__ = (
ResNetShortCut(a_ , a_ , stride=a_ ) if should_apply_shortcut else nn.Identity()
)
a__ = nn.Sequential(
ResNetConvLayer(a_ , a_ , stride=a_ ) , ResNetConvLayer(a_ , a_ , activation=a_ ) , )
a__ = ACTaFN[activation]
def _a ( self , a_ ):
a__ = hidden_state
a__ = self.layer(a_ )
a__ = self.shortcut(a_ )
hidden_state += residual
a__ = self.activation(a_ )
return hidden_state
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ = 1 , a_ = "relu" , a_ = 4 ):
super().__init__()
a__ = in_channels != out_channels or stride != 1
a__ = out_channels // reduction
a__ = (
ResNetShortCut(a_ , a_ , stride=a_ ) if should_apply_shortcut else nn.Identity()
)
a__ = nn.Sequential(
ResNetConvLayer(a_ , a_ , kernel_size=1 ) , ResNetConvLayer(a_ , a_ , stride=a_ ) , ResNetConvLayer(a_ , a_ , kernel_size=1 , activation=a_ ) , )
a__ = ACTaFN[activation]
def _a ( self , a_ ):
a__ = hidden_state
a__ = self.layer(a_ )
a__ = self.shortcut(a_ )
hidden_state += residual
a__ = self.activation(a_ )
return hidden_state
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ = 2 , a_ = 2 , ):
super().__init__()
a__ = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
a__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(a_ , a_ , stride=a_ , activation=config.hidden_act ) , *[layer(a_ , a_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _a ( self , a_ ):
a__ = input
for layer in self.layers:
a__ = layer(a_ )
return hidden_state
class __snake_case ( nn.Module):
'''simple docstring'''
def __init__( self , a_ ):
super().__init__()
a__ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
a_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
a__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(a_ , config.depths[1:] ):
self.stages.append(ResNetStage(a_ , a_ , a_ , depth=a_ ) )
def _a ( self , a_ , a_ = False , a_ = True ):
a__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a__ = hidden_states + (hidden_state,)
a__ = stage_module(a_ )
if output_hidden_states:
a__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=a_ , hidden_states=a_ , )
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = ResNetConfig
UpperCamelCase__ : Union[str, Any] = """resnet"""
UpperCamelCase__ : Optional[int] = """pixel_values"""
UpperCamelCase__ : Tuple = True
def _a ( self , a_ ):
if isinstance(a_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(a_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _a ( self , a_ , a_=False ):
if isinstance(a_ , a_ ):
a__ = value
UpperCAmelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCAmelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE ,)
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ ):
super().__init__(a_ )
a__ = config
a__ = ResNetEmbeddings(a_ )
a__ = ResNetEncoder(a_ )
a__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , a_ , a_ = None , a_ = None ):
a__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ = return_dict if return_dict is not None else self.config.use_return_dict
a__ = self.embedder(a_ )
a__ = self.encoder(
a_ , output_hidden_states=a_ , return_dict=a_ )
a__ = encoder_outputs[0]
a__ = self.pooler(a_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a_ , pooler_output=a_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,SCREAMING_SNAKE_CASE ,)
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ ):
super().__init__(a_ )
a__ = config.num_labels
a__ = ResNetModel(a_ )
# classification head
a__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , a_ = None , a_ = None , a_ = None , a_ = None , ):
a__ = return_dict if return_dict is not None else self.config.use_return_dict
a__ = self.resnet(a_ , output_hidden_states=a_ , return_dict=a_ )
a__ = outputs.pooler_output if return_dict else outputs[1]
a__ = self.classifier(a_ )
a__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a__ = """single_label_classification"""
else:
a__ = """multi_label_classification"""
if self.config.problem_type == "regression":
a__ = MSELoss()
if self.num_labels == 1:
a__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a__ = loss_fct(a_ , a_ )
elif self.config.problem_type == "single_label_classification":
a__ = CrossEntropyLoss()
a__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a__ = BCEWithLogitsLoss()
a__ = loss_fct(a_ , a_ )
if not return_dict:
a__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a_ , logits=a_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" ,SCREAMING_SNAKE_CASE ,)
class __snake_case ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ ):
super().__init__(a_ )
super()._init_backbone(a_ )
a__ = [config.embedding_size] + config.hidden_sizes
a__ = ResNetEmbeddings(a_ )
a__ = ResNetEncoder(a_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a_ )
@replace_return_docstrings(output_type=a_ , config_class=_CONFIG_FOR_DOC )
def _a ( self , a_ , a_ = None , a_ = None ):
a__ = return_dict if return_dict is not None else self.config.use_return_dict
a__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ = self.embedder(a_ )
a__ = self.encoder(a_ , output_hidden_states=a_ , return_dict=a_ )
a__ = outputs.hidden_states
a__ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
a__ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=a_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=a_ , )
| 351 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
UpperCAmelCase = {"""bert_for_seq_generation""": 512}
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self , a_ , a_="<s>" , a_="</s>" , a_="<unk>" , a_="<pad>" , a_="<::::>" , a_ = None , **a_ , ):
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , sep_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a__ = vocab_file
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def _a ( self ):
return self.sp_model.get_piece_size()
def _a ( self ):
a__ = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
a__ = self.__dict__.copy()
a__ = None
return state
def __setstate__( self , a_ ):
a__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , a_ ):
return self.sp_model.encode(a_ , out_type=a_ )
def _a ( self , a_ ):
return self.sp_model.piece_to_id(a_ )
def _a ( self , a_ ):
a__ = self.sp_model.IdToPiece(a_ )
return token
def _a ( self , a_ ):
a__ = []
a__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
a__ = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def _a ( self , a_ , a_ = None ):
if not os.path.isdir(a_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , """wb""" ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 351 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class A__ ( A__ ):
A__ = ['image_processor', 'feature_extractor']
A__ = 'TvltImageProcessor'
A__ = 'TvltFeatureExtractor'
def __init__( self : int , _a : Tuple , _a : Dict ) -> Tuple:
'''simple docstring'''
super().__init__(image_processor=_a , feature_extractor=_a )
_SCREAMING_SNAKE_CASE =image_processor
_SCREAMING_SNAKE_CASE =feature_extractor
def __call__( self : List[str] , _a : str=None , _a : Any=None , _a : List[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=False , _a : str=False , *_a : Any , **_a : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
_SCREAMING_SNAKE_CASE =None
if images is not None:
_SCREAMING_SNAKE_CASE =self.image_processor(_a , mask_pixel=_a , *_a , **_a )
if images_mixed is not None:
_SCREAMING_SNAKE_CASE =self.image_processor(_a , is_mixed=_a , *_a , **_a )
if audio is not None:
_SCREAMING_SNAKE_CASE =self.feature_extractor(
_a , *_a , sampling_rate=_a , mask_audio=_a , **_a )
_SCREAMING_SNAKE_CASE ={}
if audio is not None:
output_dict.update(_a )
if images is not None:
output_dict.update(_a )
if images_mixed_dict is not None:
output_dict.update(_a )
return output_dict
@property
def A ( self : Tuple ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processor.model_input_names
_SCREAMING_SNAKE_CASE =self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 405 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase : Tuple = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase : List[Any] = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowerCamelCase : str = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
# source code of `config_class`
_SCREAMING_SNAKE_CASE =inspect.getsource(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_SCREAMING_SNAKE_CASE =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_SCREAMING_SNAKE_CASE =f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
_SCREAMING_SNAKE_CASE =ckpt_name
break
return checkpoint
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_SCREAMING_SNAKE_CASE =get_checkpoint_from_config_class(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_SCREAMING_SNAKE_CASE ='\n'.join(sorted(_UpperCamelCase ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 405 | 1 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __a(SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase = np.max(SCREAMING_SNAKE_CASE_ , axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( __magic_name__ ):
def _snake_case ( self , **_lowerCAmelCase ) -> str:
_lowerCAmelCase = {}
if "second_text" in kwargs:
_lowerCAmelCase = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Tuple:
return self.tokenizer(_lowerCAmelCase , text_pair=_lowerCAmelCase , return_tensors=self.framework )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
return self.model(**_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = model_outputs.logits[0].numpy()
_lowerCAmelCase = softmax(_lowerCAmelCase )
_lowerCAmelCase = np.argmax(_lowerCAmelCase )
_lowerCAmelCase = self.model.config.idalabel[best_class]
_lowerCAmelCase = probabilities[best_class].item()
_lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 706 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("Input value must be an 'int' type" )
_lowerCAmelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 489 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
class lowerCamelCase__ :
def __init__( self : List[str] , _lowercase : Any = None ):
A = value
A = random()
A = None
A = None
def __repr__( self : Dict ):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self : Any ):
A = str(self.value ) + " "
A = str(self.left or '' )
A = str(self.right or '' )
return value + left + right
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A = split(root.left , __snake_case )
return left, root
else:
A = split(root.right , __snake_case )
return root, right
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A = merge(left.right , __snake_case )
return left
else:
A = merge(__snake_case , right.left )
return right
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Node | None:
"""simple docstring"""
A = Node(__snake_case )
A = split(__snake_case , __snake_case )
return merge(merge(__snake_case , __snake_case ) , __snake_case )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Node | None:
"""simple docstring"""
A = split(__snake_case , value - 1 )
A = split(__snake_case , __snake_case )
return merge(__snake_case , __snake_case )
def __snake_case ( UpperCamelCase__ ) -> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
A = insert(__snake_case , int(arg[1:] ) )
elif arg[0] == "-":
A = erase(__snake_case , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def __snake_case ( ) -> None:
"""simple docstring"""
A = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
A = input()
while args != "q":
A = interact_treap(__snake_case , __snake_case )
print(__snake_case )
A = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 690 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
_A : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __magic_name__ ( __snake_case : str ) -> List[str]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__snake_case ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def __magic_name__ ( __snake_case : str ) -> Optional[int]:
lowercase : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowercase : Optional[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
lowercase : Optional[int] = PipelineDataFormat.from_str(
format=__snake_case , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__snake_case , __snake_case )
class a__ ( a_ ):
def __init__( self , _a , _a ):
lowercase : List[str] = nlp
lowercase : Optional[Any] = reader
@staticmethod
def __magic_name__ ( _a ):
lowercase : Any = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=_a , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=_a , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=_a , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=_a , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=_a , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=_a , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=_a , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=_a , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=_a )
def __magic_name__ ( self ):
lowercase , lowercase : int = self._nlp, []
for entry in self._reader:
lowercase : str = nlp(**_a ) if self._reader.is_multi_columns else nlp(_a )
if isinstance(_a , _a ):
outputs.append(_a )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowercase : Dict = self._reader.save_binary(_a )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(_a )
| 361 | 0 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowercase_ = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
lowercase_ = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
lowercase_ = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def a__ (self , A , A , A=4 , A=False ) -> Tuple:
"""simple docstring"""
_a = compute_bleu(
reference_corpus=A , translation_corpus=A , max_order=A , smooth=A )
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 352 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase_ = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase : Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__lowerCamelCase : Optional[int] = 'translator'
__lowerCamelCase : int = AutoTokenizer
__lowerCamelCase : List[Any] = AutoModelForSeqaSeqLM
__lowerCamelCase : int = LANGUAGE_CODES
__lowerCamelCase : Tuple = ['text', 'text', 'text']
__lowerCamelCase : Optional[Any] = ['text']
def a__ (self , A , A , A ) -> List[str]:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
_a = self.lang_to_code[src_lang]
_a = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A , return_tensors='''pt''' , src_lang=A , tgt_lang=A )
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
return self.model.generate(**A )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A )
| 352 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """yolos"""
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) ->Tuple:
'''simple docstring'''
super().__init__(**A_ )
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : List[Any] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Dict = layer_norm_eps
__lowerCAmelCase : Optional[int] = image_size
__lowerCAmelCase : List[str] = patch_size
__lowerCAmelCase : Union[str, Any] = num_channels
__lowerCAmelCase : int = qkv_bias
__lowerCAmelCase : Optional[int] = num_detection_tokens
__lowerCAmelCase : List[str] = use_mid_position_embeddings
__lowerCAmelCase : List[Any] = auxiliary_loss
# Hungarian matcher
__lowerCAmelCase : Dict = class_cost
__lowerCAmelCase : Dict = bbox_cost
__lowerCAmelCase : Tuple = giou_cost
# Loss coefficients
__lowerCAmelCase : List[Any] = bbox_loss_coefficient
__lowerCAmelCase : Union[str, Any] = giou_loss_coefficient
__lowerCAmelCase : Optional[int] = eos_coefficient
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__ ( self ) ->float:
'''simple docstring'''
return 1e-4
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return 12
| 492 |
import string
def _lowercase ( lowercase__ ):
for key in range(len(string.ascii_uppercase ) ):
__lowerCAmelCase : Any = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCAmelCase : Union[str, Any] = string.ascii_uppercase.find(lowercase__ )
__lowerCAmelCase : int = num - key
if num < 0:
__lowerCAmelCase : List[str] = num + len(string.ascii_uppercase )
__lowerCAmelCase : Union[str, Any] = translated + string.ascii_uppercase[num]
else:
__lowerCAmelCase : Dict = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def _lowercase ( ):
__lowerCAmelCase : str = input('''Encrypted message: ''' )
__lowerCAmelCase : List[Any] = message.upper()
decrypt(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 492 | 1 |
def A_ ( A__ = 6008_5147_5143 ) -> int:
try:
a__ : Tuple = int(A__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
a__ : Optional[Any] = 2
a__ : Tuple = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a__ : Dict = i
while n % i == 0:
a__ : Optional[int] = n // i
i += 1
return int(A__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 392 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : Any = logging.get_logger(__name__)
lowercase : Optional[int] = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Union[str, Any] = '''deformable_detr'''
__A : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase=True , lowercase=None , lowercase=3 , lowercase=300 , lowercase=1024 , lowercase=6 , lowercase=1024 , lowercase=8 , lowercase=6 , lowercase=1024 , lowercase=8 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , lowercase=True , lowercase=False , lowercase="sine" , lowercase="resnet50" , lowercase=True , lowercase=False , lowercase=4 , lowercase=4 , lowercase=4 , lowercase=False , lowercase=300 , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=1 , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=0.1 , lowercase=0.25 , lowercase=False , **lowercase , ) -> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
a__ : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase , lowercase):
a__ : str = backbone_config.get('model_type')
a__ : int = CONFIG_MAPPING[backbone_model_type]
a__ : int = config_class.from_dict(lowercase)
a__ : Dict = use_timm_backbone
a__ : Union[str, Any] = backbone_config
a__ : str = num_channels
a__ : str = num_queries
a__ : Dict = max_position_embeddings
a__ : Any = d_model
a__ : int = encoder_ffn_dim
a__ : Dict = encoder_layers
a__ : List[str] = encoder_attention_heads
a__ : List[Any] = decoder_ffn_dim
a__ : int = decoder_layers
a__ : List[Any] = decoder_attention_heads
a__ : Dict = dropout
a__ : int = attention_dropout
a__ : Optional[Any] = activation_dropout
a__ : List[str] = activation_function
a__ : int = init_std
a__ : Tuple = init_xavier_std
a__ : Dict = encoder_layerdrop
a__ : Optional[int] = auxiliary_loss
a__ : str = position_embedding_type
a__ : Any = backbone
a__ : List[str] = use_pretrained_backbone
a__ : str = dilation
# deformable attributes
a__ : List[str] = num_feature_levels
a__ : str = encoder_n_points
a__ : Any = decoder_n_points
a__ : List[Any] = two_stage
a__ : List[Any] = two_stage_num_proposals
a__ : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
a__ : List[str] = class_cost
a__ : List[Any] = bbox_cost
a__ : str = giou_cost
# Loss coefficients
a__ : Union[str, Any] = mask_loss_coefficient
a__ : List[Any] = dice_loss_coefficient
a__ : Optional[int] = bbox_loss_coefficient
a__ : Tuple = giou_loss_coefficient
a__ : List[Any] = eos_coefficient
a__ : int = focal_alpha
a__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase , **lowercase)
@property
def __lowercase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowercase ( self) -> int:
'''simple docstring'''
return self.d_model
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
a__ : Any = self.backbone_config.to_dict()
a__ : List[str] = self.__class__.model_type
return output
| 392 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self: List[str] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=13 , _lowerCAmelCase: Optional[Any]=32 , _lowerCAmelCase: Optional[int]=3 , _lowerCAmelCase: List[str]=4 , _lowerCAmelCase: Any=[10, 20, 30, 40] , _lowerCAmelCase: Tuple=[2, 2, 3, 2] , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Any=True , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: Optional[Any]=0.02 , _lowerCAmelCase: List[str]=["stage2", "stage3", "stage4"] , _lowerCAmelCase: List[Any]=[2, 3, 4] , _lowerCAmelCase: Any=None , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =num_stages
UpperCAmelCase_ =hidden_sizes
UpperCAmelCase_ =depths
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =num_labels
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =out_features
UpperCAmelCase_ =out_indices
UpperCAmelCase_ =scope
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =ConvNextModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =ConvNextForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_ =None
UpperCAmelCase_ =ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_snake_case =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_snake_case =True
_snake_case =False
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =ConvNextModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int ):
UpperCAmelCase_ =model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ =model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ =self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ =ConvNextModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Tuple ) -> List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(_lowerCAmelCase )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase , __lowercase ):
_snake_case =(ConvNextBackbone,) if is_torch_available() else ()
_snake_case =ConvNextConfig
_snake_case =False
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =ConvNextModelTester(self )
| 54 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 0 |
import requests
lowerCamelCase = """""" # <-- Put your OpenWeatherMap appid here!
lowerCamelCase = """https://api.openweathermap.org/data/2.5/"""
def SCREAMING_SNAKE_CASE( __UpperCamelCase = "Chicago" , __UpperCamelCase = APPID ) -> dict:
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def SCREAMING_SNAKE_CASE( __UpperCamelCase = "Kolkata, India" , __UpperCamelCase = APPID ) -> dict:
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def SCREAMING_SNAKE_CASE( __UpperCamelCase = 5_5.6_8 , __UpperCamelCase = 1_2.5_7 , __UpperCamelCase = APPID ) -> dict:
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCamelCase = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 720 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _a ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = " " ):
"""simple docstring"""
a__ : List[Any] = sentence_delimiter
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
return list(__UpperCAmelCase )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : str = []
for sent_idx, sentence in enumerate(__UpperCAmelCase ):
chars.extend(self.process_string(__UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCamelCase = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowerCamelCase = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )["wer"]
a__ : Any = 0
a__ : int = 0
for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase ):
a__ : Tuple = jiwer.compute_measures(
__UpperCAmelCase , __UpperCAmelCase , truth_transform=__UpperCAmelCase , hypothesis_transform=__UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 207 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: SplitDict ):
SCREAMING_SNAKE_CASE__ = split_dict._to_yaml_list()
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = SplitDict._from_yaml_list(UpperCamelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE__ = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase__ ), SplitInfo(dataset_name="""my_dataset""" )] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
SCREAMING_SNAKE_CASE__ = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 6 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__SCREAMING_SNAKE_CASE :int = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__SCREAMING_SNAKE_CASE :Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Union[str, Any]=100 , __lowercase : Dict=" " ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = text.split(__lowercase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowercase ) , __lowercase )]
def UpperCAmelCase_ ( __lowercase : dict ) -> dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__lowercase ):
titles.append(title if title is not None else "" )
texts.append(__lowercase )
return {"title": titles, "text": texts}
def UpperCAmelCase_ ( __lowercase : dict , __lowercase : DPRContextEncoder , __lowercase : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
_UpperCAmelCase = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__lowercase , padding="longest" , return_tensors="pt" )["input_ids"]
_UpperCAmelCase = ctx_encoder(input_ids.to(device=__lowercase ) , return_dict=__lowercase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase_ ( __lowercase : "RagExampleArguments" , __lowercase : "ProcessingArguments" , __lowercase : "IndexHnswArguments" , ) -> Any:
'''simple docstring'''
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCAmelCase = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCAmelCase = dataset.map(__lowercase , batched=__lowercase , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowercase )
_UpperCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCAmelCase = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCAmelCase = dataset.map(
partial(__lowercase , ctx_encoder=__lowercase , ctx_tokenizer=__lowercase ) , batched=__lowercase , batch_size=processing_args.batch_size , features=__lowercase , )
# And finally save your dataset
_UpperCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__lowercase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__lowercase )
# And save the index
_UpperCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowercase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A_ :
_lowerCamelCase : str = field(
default=str(Path(lowerCAmelCase_ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
_lowerCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
_lowerCamelCase : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
_lowerCamelCase : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
_lowerCamelCase : Optional[str] = field(
default=str(Path(lowerCAmelCase_ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class A_ :
_lowerCamelCase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
_lowerCamelCase : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class A_ :
_lowerCamelCase : int = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
_lowerCamelCase : int = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__SCREAMING_SNAKE_CASE :Tuple = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE :Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 236 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase ( _snake_case : str ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase__ = model_type_to_module_name(_snake_case )
lowercase__ = importlib.import_module(f'''.{module_name}''' ,"transformers.models" )
try:
return getattr(_snake_case ,_snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_snake_case ,"__name__" ,_snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase__ = importlib.import_module("transformers" )
if hasattr(_snake_case ,_snake_case ):
return getattr(_snake_case ,_snake_case )
return None
def lowerCamelCase ( _snake_case : Union[str, os.PathLike] ,_snake_case : Optional[Union[str, os.PathLike]] = None ,_snake_case : bool = False ,_snake_case : bool = False ,_snake_case : Optional[Dict[str, str]] = None ,_snake_case : Optional[Union[bool, str]] = None ,_snake_case : Optional[str] = None ,_snake_case : bool = False ,**_snake_case : Any ,):
'''simple docstring'''
lowercase__ = get_file_from_repo(
_snake_case ,_snake_case ,cache_dir=_snake_case ,force_download=_snake_case ,resume_download=_snake_case ,proxies=_snake_case ,use_auth_token=_snake_case ,revision=_snake_case ,local_files_only=_snake_case ,)
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(_snake_case ,encoding="utf-8" ) as reader:
return json.load(_snake_case )
class snake_case :
def __init__( self ) -> int:
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def _a ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> str:
lowercase__ = kwargs.pop("config" ,UpperCAmelCase_ )
lowercase__ = kwargs.pop("trust_remote_code" ,UpperCAmelCase_ )
lowercase__ = True
lowercase__ , lowercase__ = ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = config_dict.get("image_processor_type" ,UpperCAmelCase_ )
lowercase__ = None
if "AutoImageProcessor" in config_dict.get("auto_map" ,{} ):
lowercase__ = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase__ = config_dict.pop("feature_extractor_type" ,UpperCAmelCase_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
lowercase__ = feature_extractor_class.replace("FeatureExtractor" ,"ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" ,{} ):
lowercase__ = config_dict["auto_map"]["AutoFeatureExtractor"]
lowercase__ = feature_extractor_auto_map.replace("FeatureExtractor" ,"ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
lowercase__ = AutoConfig.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
# It could be in `config.image_processor_type``
lowercase__ = getattr(UpperCAmelCase_ ,"image_processor_type" ,UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ ,"auto_map" ) and "AutoImageProcessor" in config.auto_map:
lowercase__ = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
lowercase__ = image_processor_class_from_name(UpperCAmelCase_ )
lowercase__ = image_processor_auto_map is not None
lowercase__ = image_processor_class is not None or type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING
lowercase__ = resolve_trust_remote_code(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
lowercase__ = get_class_from_dynamic_module(
UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = kwargs.pop("code_revision" ,UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING:
lowercase__ = IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase_ )]
return image_processor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _a ( UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase_ ,UpperCAmelCase_ )
| 539 |
'''simple docstring'''
def lowerCamelCase ( _snake_case : List[str] ): # noqa: E741
'''simple docstring'''
lowercase__ = len(_snake_case )
lowercase__ = 0
lowercase__ = [0] * n
lowercase__ = [False] * n
lowercase__ = [False] * n
def dfs(_snake_case : Tuple ,_snake_case : int ,_snake_case : List[str] ,_snake_case : str ):
if parent == root:
out_edge_count += 1
lowercase__ = True
lowercase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase__ = dfs(_snake_case ,_snake_case ,_snake_case ,_snake_case )
lowercase__ = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
lowercase__ = True
# AP found via cycle
if at == low[to]:
lowercase__ = True
else:
lowercase__ = min(low[at] ,_snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
lowercase__ = 0
lowercase__ = dfs(_snake_case ,_snake_case ,-1 ,_snake_case )
lowercase__ = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
SCREAMING_SNAKE_CASE__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 539 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.