code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( _a ):
'''simple docstring'''
lowerCAmelCase_ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ = """FlavaImageProcessor"""
lowerCAmelCase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
lowerCamelCase__ = kwargs.pop('''feature_extractor''' )
lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.image_processor
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase__ = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if images is not None:
lowerCamelCase__ = self.image_processor(
__lowerCAmelCase , return_image_mask=__lowerCAmelCase , return_codebook_pixels=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if text is not None and images is not None:
encoding.update(__lowerCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def __lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 209
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=64, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=1, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Any = num_choices
_lowercase : Tuple = scope
_lowercase : Optional[Any] = q_groups
_lowercase : List[str] = k_groups
_lowercase : Optional[int] = v_groups
_lowercase : List[str] = post_attention_groups
_lowercase : Union[str, Any] = intermediate_groups
_lowercase : int = output_groups
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : Dict = None
_lowercase : int = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = SqueezeBertModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = SqueezeBertForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : int = SqueezeBertForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : List[str] = SqueezeBertForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : str = SqueezeBertForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Dict = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase_ : Optional[int] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : List[str] = True
lowercase_ : int = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = SqueezeBertModelTester(self)
_lowercase : Dict = ConfigTester(self, config_class=lowerCamelCase, dim=37)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_lowercase : Optional[int] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]])
_lowercase : List[str] = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = torch.Size((1, 3))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-4))
| 21
| 0
|
"""simple docstring"""
import math
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : str = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case_ )
def A_ ( snake_case_ : float = 1 / 1_2_3_4_5 ):
'''simple docstring'''
UpperCamelCase : str = 0
UpperCamelCase : Dict = 0
UpperCamelCase : Dict = 3
while True:
UpperCamelCase : Union[str, Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case_ ):
UpperCamelCase : List[str] = int(snake_case_ )
total_partitions += 1
if check_partition_perfect(snake_case_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case_ )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 27
|
"""simple docstring"""
def A_ ( snake_case_ : int ):
'''simple docstring'''
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27
| 1
|
from __future__ import annotations
from collections import deque
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(_SCREAMING_SNAKE_CASE )
self.set_fail_transitions()
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = 0
for character in keyword:
_UpperCAmelCase = self.find_next_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_UpperCAmelCase = len(self.adlist ) - 1
else:
_UpperCAmelCase = next_state
self.adlist[current_state]["output"].append(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
while q:
_UpperCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.adlist[r]['fail_state']
while (
self.find_next_state(_SCREAMING_SNAKE_CASE , self.adlist[child]['value'] ) is None
and state != 0
):
_UpperCAmelCase = self.adlist[state]['fail_state']
_UpperCAmelCase = self.find_next_state(
_SCREAMING_SNAKE_CASE , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
_UpperCAmelCase = 0
_UpperCAmelCase = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> dict[str, list[int]]:
"""simple docstring"""
_UpperCAmelCase = {} # returns a dict with keywords and list of its occurrences
_UpperCAmelCase = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
while (
self.find_next_state(_SCREAMING_SNAKE_CASE , string[i] ) is None
and current_state != 0
):
_UpperCAmelCase = self.adlist[current_state]['fail_state']
_UpperCAmelCase = self.find_next_state(_SCREAMING_SNAKE_CASE , string[i] )
if next_state is None:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_UpperCAmelCase = []
result[key].append(i - len(_SCREAMING_SNAKE_CASE ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 329
| 1
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_A : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = ["pixel_values"]
def __init__( self : Optional[int] , A : bool = True , A : Union[int, float] = 1 / 2_5_5 , A : bool = True , A : int = 8 , **A : Any , ) ->None:
super().__init__(**A )
lowerCamelCase__ : Optional[int] = do_rescale
lowerCamelCase__ : int = rescale_factor
lowerCamelCase__ : str = do_pad
lowerCamelCase__ : int = pad_size
def __lowerCamelCase ( self : List[str] , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[int] ) ->np.ndarray:
return rescale(A , scale=A , data_format=A , **A )
def __lowerCamelCase ( self : Tuple , A : np.ndarray , A : int , A : Optional[Union[str, ChannelDimension]] = None ) ->str:
lowerCamelCase__ : Any = get_image_size(A )
lowerCamelCase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowerCamelCase__ : int = (old_width // size + 1) * size - old_width
return pad(A , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=A )
def __lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : Optional[Any] , ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Tuple = do_pad if do_pad is not None else self.do_pad
lowerCamelCase__ : Any = pad_size if pad_size is not None else self.pad_size
lowerCamelCase__ : str = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase__ : List[Any] = [to_numpy_array(A ) for image in images]
if do_rescale:
lowerCamelCase__ : Union[str, Any] = [self.rescale(image=A , scale=A ) for image in images]
if do_pad:
lowerCamelCase__ : List[Any] = [self.pad(A , size=A ) for image in images]
lowerCamelCase__ : Dict = [to_channel_dimension_format(A , A ) for image in images]
lowerCamelCase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A )
| 364
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_A : Any = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
def run_func(UpperCAmelCase ):
@wraps(UpperCAmelCase )
def run_in_eager_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
@wraps(UpperCAmelCase )
@tf.function(experimental_compile=UpperCAmelCase )
def run_in_graph_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = random.Random()
lowerCamelCase__ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : TensorFlowBenchmarkArguments
_UpperCAmelCase : PretrainedConfig
_UpperCAmelCase : str = "TensorFlow"
@property
def __lowerCamelCase ( self : int ) ->Optional[int]:
return tf.__version__
def __lowerCamelCase ( self : Optional[int] , A : str , A : int , A : int ) ->float:
# initialize GPU on separate process
lowerCamelCase__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : int = self._prepare_inference_func(A , A , A )
return self._measure_speed(_inference )
def __lowerCamelCase ( self : str , A : str , A : int , A : int ) ->float:
lowerCamelCase__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : List[Any] = self._prepare_train_func(A , A , A )
return self._measure_speed(_train )
def __lowerCamelCase ( self : int , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : int = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_inference_func(A , A , A )
return self._measure_memory(_inference )
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_train_func(A , A , A )
return self._measure_memory(_train )
def __lowerCamelCase ( self : Dict , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Tuple = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : int = getattr(A , A )
lowerCamelCase__ : int = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Tuple = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Optional[Any] = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A , decoder_input_ids=A , training=A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A , training=A )
lowerCamelCase__ : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Optional[int] = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[str] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : Optional[int] = getattr(A , A )
lowerCamelCase__ : Optional[Any] = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Optional[int] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Dict = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCamelCase__ : int = model(A , decoder_input_ids=A , labels=A , training=A )[0]
lowerCamelCase__ : List[Any] = tf.gradients(A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCamelCase__ : Optional[int] = model(A , labels=A , training=A )[0]
lowerCamelCase__ : List[str] = tf.gradients(A , model.trainable_variables )
return gradients
lowerCamelCase__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCamelCase ( self : Tuple , A : Any ) ->float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCamelCase__ : Optional[Any] = timeit.repeat(
A , repeat=self.args.repeat , number=1_0 , )
return min(A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def __lowerCamelCase ( self : List[Any] , A : Callable[[], None] ) ->[Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
lowerCamelCase__ : Union[str, Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
lowerCamelCase__ : Union[str, Any] = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
lowerCamelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCamelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(A )
lowerCamelCase__ : List[Any] = meminfo.used
lowerCamelCase__ : Union[str, Any] = Memory(A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
lowerCamelCase__ : Tuple = None
else:
lowerCamelCase__ : Dict = measure_peak_memory_cpu(A )
lowerCamelCase__ : Optional[Any] = Memory(A ) if isinstance(A , A ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCamelCase__ : Union[str, Any] = stop_memory_tracing(A )
if memory is None:
lowerCamelCase__ : Dict = summary.total
else:
lowerCamelCase__ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 265
| 0
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _lowerCamelCase ( lowercase : str = "" ) -> dict[str, float]:
_a = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
_a = BeautifulSoup(requests.get(lowercase ).text , "html.parser" )
_a = soup.find_all("td" , attrs="titleColumn" )
_a = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase , lowercase )
}
def _lowerCamelCase ( lowercase : str = "IMDb_Top_250_Movies.csv" ) -> None:
_a = get_imdb_top_aaa_movies()
with open(lowercase , "w" , newline="" ) as out_file:
_a = csv.writer(lowercase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 63
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63
| 1
|
import math
import tensorflow as tf
from packaging import version
def __snake_case ( _UpperCAmelCase ):
__a = tf.convert_to_tensor(_UpperCAmelCase )
__a = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __snake_case ( _UpperCAmelCase ):
__a = tf.convert_to_tensor(_UpperCAmelCase )
__a = tf.cast(math.pi , x.dtype )
__a = tf.cast(0.04_47_15 , x.dtype )
__a = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_UpperCAmelCase , 3 )) ))
return x * cdf
def __snake_case ( _UpperCAmelCase ):
__a = tf.convert_to_tensor(_UpperCAmelCase )
return x * tf.tanh(tf.math.softplus(_UpperCAmelCase ) )
def __snake_case ( _UpperCAmelCase ):
__a = tf.convert_to_tensor(_UpperCAmelCase )
__a = tf.cast(0.04_47_15 , x.dtype )
__a = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __snake_case ( _UpperCAmelCase ):
__a = tf.convert_to_tensor(_UpperCAmelCase )
__a = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __snake_case ( _UpperCAmelCase ):
return tf.clip_by_value(_gelu(_UpperCAmelCase ) , -10 , 10 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=-1 ):
__a , __a = tf.split(_UpperCAmelCase , 2 , axis=_UpperCAmelCase )
return a * tf.math.sigmoid(_UpperCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __snake_case ( _UpperCAmelCase ):
return tf.keras.activations.gelu(_UpperCAmelCase , approximate=_UpperCAmelCase )
__snake_case :Optional[int] = tf.keras.activations.gelu
__snake_case :List[str] = approximate_gelu_wrap
else:
__snake_case :str = _gelu
__snake_case :Optional[int] = _gelu_new
__snake_case :List[str] = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __snake_case ( _UpperCAmelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 131
|
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return base * power(_UpperCAmelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
__snake_case :List[Any] = int(input('''Enter the base: ''').strip())
__snake_case :Dict = int(input('''Enter the exponent: ''').strip())
__snake_case :int = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__snake_case :Optional[Any] = 1 / result
print(f'{base} to the power of {exponent} is {result}')
| 131
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase_ = 25_0004
lowercase_ = 25_0020
@require_sentencepiece
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = MBartTokenizer
A_ : Tuple = MBartTokenizerFast
A_ : Optional[int] = True
A_ : Union[str, Any] = True
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A = MBartTokenizer(_lowerCamelCase, keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = MBartTokenizer(_lowerCamelCase, keep_accents=_lowerCamelCase )
__A = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]], )
__A = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
__A = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
__A = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__A = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__A = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase, **_lowerCamelCase )
__A = self.tokenizer_class.from_pretrained(_lowerCamelCase, **_lowerCamelCase )
__A = tempfile.mkdtemp()
__A = tokenizer_r.save_pretrained(_lowerCamelCase )
__A = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__A = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_lowerCamelCase, _lowerCamelCase )
# Checks everything loads correctly in the same way
__A = tokenizer_r.from_pretrained(_lowerCamelCase )
__A = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__A = tempfile.mkdtemp()
__A = tokenizer_r.save_pretrained(_lowerCamelCase, legacy_format=_lowerCamelCase )
__A = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCamelCase, _lowerCamelCase )
# Checks everything loads correctly in the same way
__A = tokenizer_r.from_pretrained(_lowerCamelCase )
__A = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__A = tempfile.mkdtemp()
__A = tokenizer_r.save_pretrained(_lowerCamelCase, legacy_format=_lowerCamelCase )
__A = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__A = tokenizer_r.from_pretrained(_lowerCamelCase )
__A = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = "facebook/mbart-large-en-ro"
A_ : int = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
A_ : Optional[int] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
A_ : Tuple = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ):
'''simple docstring'''
__A = MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
__A = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_00_20 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
self.assertIn(_lowerCamelCase, self.tokenizer.all_special_ids )
__A = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__A = self.tokenizer.decode(_lowerCamelCase, skip_special_tokens=_lowerCamelCase )
__A = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase, _lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], _lowerCamelCase )
__A = 10
__A = self.tokenizer(_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], _lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_00_26, 25_00_01] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = tempfile.mkdtemp()
__A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCamelCase )
__A = MBartTokenizer.from_pretrained(_lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, _lowerCamelCase )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=_lowerCamelCase, return_tensors='''pt''' )
__A = shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=_lowerCamelCase, truncation=_lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
__A = shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCamelCase, _lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
__A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, _lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.tokenizer(self.src_text, padding=_lowerCamelCase, truncation=_lowerCamelCase, max_length=3, return_tensors='''pt''' )
__A = self.tokenizer(
text_target=self.tgt_text, padding=_lowerCamelCase, truncation=_lowerCamelCase, max_length=10, return_tensors='''pt''' )
__A = targets['''input_ids''']
__A = shift_tokens_right(_lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
}, )
| 266
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=13, _lowerCamelCase : Any=3, _lowerCamelCase : Optional[int]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : Dict=4_00, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=None, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Any=[0.5, 0.5, 0.5], _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
# Initialize image_processor
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
| 266
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowercase_ = R"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(UpperCAmelCase )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = 'rag'
_UpperCamelCase : Union[str, Any] = True
def __init__( self : List[Any] , a : Tuple=None , a : Optional[Any]=True , a : Optional[Any]=None , a : Tuple=None , a : Optional[Any]=None , a : Any=None , a : Any=None , a : Optional[int]=" / " , a : Union[str, Any]=" // " , a : Union[str, Any]=5 , a : int=300 , a : Tuple=768 , a : Optional[Any]=8 , a : Any="wiki_dpr" , a : Optional[int]="train" , a : List[Any]="compressed" , a : Union[str, Any]=None , a : Dict=None , a : int=False , a : str=False , a : Union[str, Any]=0.0 , a : Optional[int]=True , a : Optional[Any]=False , a : Any=False , a : Any=False , a : str=True , a : Union[str, Any]=None , **a : str , )-> Optional[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=a , pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , forced_eos_token_id=a , is_encoder_decoder=a , prefix=a , vocab_size=a , **a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__ = kwargs.pop('question_encoder' )
lowercase__ = question_encoder_config.pop('model_type' )
lowercase__ = kwargs.pop('generator' )
lowercase__ = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
lowercase__ = AutoConfig.for_model(a , **a )
lowercase__ = AutoConfig.for_model(a , **a )
lowercase__ = reduce_loss
lowercase__ = label_smoothing
lowercase__ = exclude_bos_score
lowercase__ = do_marginalize
lowercase__ = title_sep
lowercase__ = doc_sep
lowercase__ = n_docs
lowercase__ = max_combined_length
lowercase__ = dataset
lowercase__ = dataset_split
lowercase__ = index_name
lowercase__ = retrieval_vector_size
lowercase__ = retrieval_batch_size
lowercase__ = passages_path
lowercase__ = index_path
lowercase__ = use_dummy_dataset
lowercase__ = output_retrieved
lowercase__ = do_deduplication
lowercase__ = use_cache
if self.forced_eos_token_id is None:
lowercase__ = getattr(self.generator , 'forced_eos_token_id' , a )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , a : PretrainedConfig , a : PretrainedConfig , **a : Union[str, Any] )-> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.question_encoder.to_dict()
lowercase__ = self.generator.to_dict()
lowercase__ = self.__class__.model_type
return output
| 269
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(_SCREAMING_SNAKE_CASE , exponent // 2 , _SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_SCREAMING_SNAKE_CASE , exponent - 1 , _SCREAMING_SNAKE_CASE )) % modulo_value
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 1777 , _SCREAMING_SNAKE_CASE = 1855 , _SCREAMING_SNAKE_CASE = 8 ) -> int:
lowercase__ = base
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
lowercase__ = _modexpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 269
| 1
|
from manim import *
class lowercase__ ( _UpperCAmelCase ):
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("CPU" , font_size=24 )
lowerCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ = [mem.copy() for i in range(4 )]
lowerCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("GPU" , font_size=24 )
lowerCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("Model" , font_size=24 )
lowerCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
lowerCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCAmelCase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
cpu_targs.append(__UpperCAmelCase )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("Loaded Checkpoint" , font_size=24 )
lowerCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , aligned_edge=__UpperCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowerCAmelCase__ = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) , Write(__UpperCAmelCase ) )
self.play(Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
first_animations.append(GrowFromCenter(__UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(*__UpperCAmelCase )
self.wait()
| 340
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
lowerCAmelCase__ : Tuple = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
lowerCAmelCase__ : List[Any] = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase__ : Tuple = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCAmelCase__ : List[str] = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(_a )-1}' )
if "norm" in key:
lowerCAmelCase__ : Union[str, Any] = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase__ : Any = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
lowerCAmelCase__ : Any = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(_a )-1}' )
if "layer_norm1" in key:
lowerCAmelCase__ : Tuple = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowerCAmelCase__ : int = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase__ : Optional[Any] = key[key.find('''block''' ) + len('''block''' )]
lowerCAmelCase__ : Any = key.replace(f'block{idx}' , f'block.{int(_a )-1}' )
if "attn.q" in key:
lowerCAmelCase__ : Optional[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowerCAmelCase__ : Dict = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowerCAmelCase__ : Tuple = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowerCAmelCase__ : str = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowerCAmelCase__ : List[str] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowerCAmelCase__ : List[str] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowerCAmelCase__ : str = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowerCAmelCase__ : Any = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase__ : Tuple = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCAmelCase__ : int = key.replace(f'linear_c{idx}' , f'linear_c.{int(_a )-1}' )
if "bot_conv" in key:
lowerCAmelCase__ : Tuple = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
lowerCAmelCase__ : int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
lowerCAmelCase__ : int = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
lowerCAmelCase__ : Tuple = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
lowerCAmelCase__ : Union[str, Any] = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
lowerCAmelCase__ : Any = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
lowerCAmelCase__ : Any = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
lowerCAmelCase__ : int = key.replace('''module.last_layer_depth''' , '''head.head''' )
lowerCAmelCase__ : Dict = value
return new_state_dict
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase__ : Any = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase__ : Any = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase__ : Tuple = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase__ : List[Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase__ : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase__ : List[Any] = kv_bias[config.hidden_sizes[i] :]
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : int = Image.open(requests.get(_a , stream=_a ).raw )
return image
@torch.no_grad()
def lowerCamelCase_ ( _a , _a , _a=False , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase__ : Optional[int] = GLPNImageProcessor()
# prepare image
lowerCAmelCase__ : Any = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(images=_a , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
lowerCAmelCase__ : List[Any] = torch.load(_a , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCAmelCase__ : Dict = rename_keys(_a )
# key and value matrices need special treatment
read_in_k_v(_a , _a )
# create HuggingFace model and load state dict
lowerCAmelCase__ : Optional[Any] = GLPNForDepthEstimation(_a )
model.load_state_dict(_a )
model.eval()
# forward pass
lowerCAmelCase__ : str = model(_a )
lowerCAmelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f'Unknown model name: {model_name}' )
lowerCAmelCase__ : Any = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _a , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_a , _a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_a , )
image_processor.push_to_hub(
repo_path_or_name=Path(_a , _a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_a , )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
lowerCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 353
|
def lowerCamelCase_ ( _a = 4_000_000 ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_a )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = b, a + b
return sum(_a )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 211
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ):
A : Optional[int] = 1
A : Tuple = 3
A : Any = (32, 32)
A : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(lowerCamelCase__ )
return image
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : Tuple = UNetaDConditionModel(
block_out_channels=(32, 32, 64), layers_per_block=2, sample_size=32, in_channels=7, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D"""), cross_attention_dim=32, attention_head_dim=8, use_linear_projection=lowerCamelCase__, only_cross_attention=(True, True, False), num_class_embeds=100, )
return model
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : int = AutoencoderKL(
block_out_channels=[32, 32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, )
return model
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="""gelu""", projection_dim=512, )
return CLIPTextModel(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
A : str = self.dummy_cond_unet_upscale
A : Tuple = DDPMScheduler()
A : List[str] = DDIMScheduler(prediction_type="""v_prediction""" )
A : Any = self.dummy_vae
A : str = self.dummy_text_encoder
A : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A : Optional[int] = self.dummy_image.cpu().permute(0, 2, 3, 1 )[0]
A : Dict = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A : Dict = StableDiffusionUpscalePipeline(
unet=lowerCamelCase__, low_res_scheduler=lowerCamelCase__, scheduler=lowerCamelCase__, vae=lowerCamelCase__, text_encoder=lowerCamelCase__, tokenizer=lowerCamelCase__, max_noise_level=350, )
A : Dict = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : List[Any] = """A painting of a squirrel eating a burger"""
A : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
A : Any = sd_pipe(
[prompt], image=lowerCamelCase__, generator=lowerCamelCase__, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="""np""", )
A : str = output.images
A : Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
A : Dict = sd_pipe(
[prompt], image=lowerCamelCase__, generator=lowerCamelCase__, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="""np""", return_dict=lowerCamelCase__, )[0]
A : Union[str, Any] = image[0, -3:, -3:, -1]
A : Dict = image_from_tuple[0, -3:, -3:, -1]
A : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A : Union[str, Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ):
A : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A : Optional[Any] = self.dummy_cond_unet_upscale
A : List[Any] = DDPMScheduler()
A : int = DDIMScheduler(prediction_type="""v_prediction""" )
A : Tuple = self.dummy_vae
A : Union[str, Any] = self.dummy_text_encoder
A : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A : List[Any] = self.dummy_image.cpu().permute(0, 2, 3, 1 )[0]
A : List[str] = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A : int = StableDiffusionUpscalePipeline(
unet=lowerCamelCase__, low_res_scheduler=lowerCamelCase__, scheduler=lowerCamelCase__, vae=lowerCamelCase__, text_encoder=lowerCamelCase__, tokenizer=lowerCamelCase__, max_noise_level=350, )
A : str = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : Dict = """A painting of a squirrel eating a burger"""
A : Optional[int] = sd_pipe(
2 * [prompt], image=2 * [low_res_image], guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="""np""", )
A : Dict = output.images
assert image.shape[0] == 2
A : List[str] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
A : Optional[Any] = sd_pipe(
[prompt], image=lowerCamelCase__, generator=lowerCamelCase__, num_images_per_prompt=2, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="""np""", )
A : List[str] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""", """This test requires a GPU""" )
def _lowerCAmelCase ( self ):
A : List[str] = self.dummy_cond_unet_upscale
A : Any = DDPMScheduler()
A : Optional[Any] = DDIMScheduler(prediction_type="""v_prediction""" )
A : List[str] = self.dummy_vae
A : List[Any] = self.dummy_text_encoder
A : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A : str = self.dummy_image.cpu().permute(0, 2, 3, 1 )[0]
A : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
A : int = unet.half()
A : List[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
A : Optional[Any] = StableDiffusionUpscalePipeline(
unet=lowerCamelCase__, low_res_scheduler=lowerCamelCase__, scheduler=lowerCamelCase__, vae=lowerCamelCase__, text_encoder=lowerCamelCase__, tokenizer=lowerCamelCase__, max_noise_level=350, )
A : Any = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : str = """A painting of a squirrel eating a burger"""
A : List[str] = torch.manual_seed(0 )
A : Optional[int] = sd_pipe(
[prompt], image=lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=2, output_type="""np""", ).images
A : List[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
A : Optional[int] = """stabilityai/stable-diffusion-x4-upscaler"""
A : List[str] = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
A : Union[str, Any] = """a cat sitting on a park bench"""
A : Union[str, Any] = torch.manual_seed(0 )
A : Optional[Any] = pipe(
prompt=lowerCamelCase__, image=lowerCamelCase__, generator=lowerCamelCase__, output_type="""np""", )
A : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ):
A : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
A : str = """stabilityai/stable-diffusion-x4-upscaler"""
A : Tuple = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
A : Tuple = """a cat sitting on a park bench"""
A : List[str] = torch.manual_seed(0 )
A : List[str] = pipe(
prompt=lowerCamelCase__, image=lowerCamelCase__, generator=lowerCamelCase__, output_type="""np""", )
A : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A : List[Any] = """stabilityai/stable-diffusion-x4-upscaler"""
A : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A : Any = """a cat sitting on a park bench"""
A : Optional[Any] = torch.manual_seed(0 )
A : Tuple = pipe(
prompt=lowerCamelCase__, image=lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=5, output_type="""np""", )
A : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 116
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=0.0, lowerCamelCase__ = None, lowerCamelCase__ = "geglu", lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = True, lowerCamelCase__ = "layer_norm", lowerCamelCase__ = False, ):
super().__init__()
A : Dict = only_cross_attention
A : Tuple = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
A : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A : Dict = AdaLayerNorm(lowerCamelCase__, lowerCamelCase__ )
elif self.use_ada_layer_norm_zero:
A : List[str] = AdaLayerNormZero(lowerCamelCase__, lowerCamelCase__ )
else:
A : Tuple = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
A : Any = Attention(
query_dim=lowerCamelCase__, heads=lowerCamelCase__, dim_head=lowerCamelCase__, dropout=lowerCamelCase__, bias=lowerCamelCase__, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=lowerCamelCase__, )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A : int = (
AdaLayerNorm(lowerCamelCase__, lowerCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
)
A : Dict = Attention(
query_dim=lowerCamelCase__, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=lowerCamelCase__, dim_head=lowerCamelCase__, dropout=lowerCamelCase__, bias=lowerCamelCase__, upcast_attention=lowerCamelCase__, ) # is self-attn if encoder_hidden_states is none
else:
A : Dict = None
A : Dict = None
# 3. Feed-forward
A : Optional[Any] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
A : int = FeedForward(lowerCamelCase__, dropout=lowerCamelCase__, activation_fn=lowerCamelCase__, final_dropout=lowerCamelCase__ )
# let chunk size default to None
A : Optional[Any] = None
A : int = 0
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
# Sets chunk feed-forward
A : List[str] = chunk_size
A : int = dim
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A : Optional[int] = self.norma(lowerCamelCase__, lowerCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A : Tuple = self.norma(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, hidden_dtype=hidden_states.dtype )
else:
A : Tuple = self.norma(lowerCamelCase__ )
A : Dict = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A : str = self.attna(
lowerCamelCase__, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=lowerCamelCase__, **lowerCamelCase__, )
if self.use_ada_layer_norm_zero:
A : List[Any] = gate_msa.unsqueeze(1 ) * attn_output
A : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A : Optional[int] = (
self.norma(lowerCamelCase__, lowerCamelCase__ ) if self.use_ada_layer_norm else self.norma(lowerCamelCase__ )
)
A : Optional[Any] = self.attna(
lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, attention_mask=lowerCamelCase__, **lowerCamelCase__, )
A : Dict = attn_output + hidden_states
# 3. Feed-forward
A : str = self.norma(lowerCamelCase__ )
if self.use_ada_layer_norm_zero:
A : Tuple = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A : Any = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A : Optional[Any] = torch.cat(
[self.ff(lowerCamelCase__ ) for hid_slice in norm_hidden_states.chunk(lowerCamelCase__, dim=self._chunk_dim )], dim=self._chunk_dim, )
else:
A : Dict = self.ff(lowerCamelCase__ )
if self.use_ada_layer_norm_zero:
A : Optional[Any] = gate_mlp.unsqueeze(1 ) * ff_output
A : Optional[int] = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = 4, lowerCamelCase__ = 0.0, lowerCamelCase__ = "geglu", lowerCamelCase__ = False, ):
super().__init__()
A : str = int(dim * mult )
A : Optional[Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A : Dict = GELU(lowerCamelCase__, lowerCamelCase__ )
if activation_fn == "gelu-approximate":
A : Optional[int] = GELU(lowerCamelCase__, lowerCamelCase__, approximate="""tanh""" )
elif activation_fn == "geglu":
A : Any = GEGLU(lowerCamelCase__, lowerCamelCase__ )
elif activation_fn == "geglu-approximate":
A : Optional[Any] = ApproximateGELU(lowerCamelCase__, lowerCamelCase__ )
A : Dict = nn.ModuleList([] )
# project in
self.net.append(lowerCamelCase__ )
# project dropout
self.net.append(nn.Dropout(lowerCamelCase__ ) )
# project out
self.net.append(nn.Linear(lowerCamelCase__, lowerCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(lowerCamelCase__ ) )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
for module in self.net:
A : int = module(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = "none" ):
super().__init__()
A : Optional[int] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
A : int = approximate
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if gate.device.type != "mps":
return F.gelu(lowerCamelCase__, approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ), approximate=self.approximate ).to(dtype=gate.dtype )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self.proj(lowerCamelCase__ )
A : Union[str, Any] = self.gelu(lowerCamelCase__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = nn.Linear(lowerCamelCase__, dim_out * 2 )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if gate.device.type != "mps":
return F.gelu(lowerCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A , A : Any = self.proj(lowerCamelCase__ ).chunk(2, dim=-1 )
return hidden_states * self.gelu(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : List[Any] = self.proj(lowerCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Dict = nn.Embedding(lowerCamelCase__, lowerCamelCase__ )
A : Tuple = nn.SiLU()
A : Tuple = nn.Linear(lowerCamelCase__, embedding_dim * 2 )
A : List[str] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : int = self.linear(self.silu(self.emb(lowerCamelCase__ ) ) )
A , A : Optional[int] = torch.chunk(lowerCamelCase__, 2 )
A : Tuple = self.norm(lowerCamelCase__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
super().__init__()
A : Union[str, Any] = CombinedTimestepLabelEmbeddings(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = nn.SiLU()
A : str = nn.Linear(lowerCamelCase__, 6 * embedding_dim, bias=lowerCamelCase__ )
A : List[Any] = nn.LayerNorm(lowerCamelCase__, elementwise_affine=lowerCamelCase__, eps=1e-6 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A : Tuple = self.linear(self.silu(self.emb(lowerCamelCase__, lowerCamelCase__, hidden_dtype=lowerCamelCase__ ) ) )
A , A , A , A , A , A : Optional[Any] = emb.chunk(6, dim=1 )
A : List[str] = self.norm(lowerCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = 1e-5 ):
super().__init__()
A : int = num_groups
A : Dict = eps
if act_fn is None:
A : Union[str, Any] = None
else:
A : Optional[int] = get_activation(lowerCamelCase__ )
A : Dict = nn.Linear(lowerCamelCase__, out_dim * 2 )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if self.act:
A : Dict = self.act(lowerCamelCase__ )
A : Optional[int] = self.linear(lowerCamelCase__ )
A : int = emb[:, :, None, None]
A , A : Tuple = emb.chunk(2, dim=1 )
A : Any = F.group_norm(lowerCamelCase__, self.num_groups, eps=self.eps )
A : List[str] = x * (1 + scale) + shift
return x
| 116
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A_ : Dict = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : Optional[Any] = num_channels
A_ : List[str] = min_resolution
A_ : int = max_resolution
A_ : str = do_resize
A_ : Optional[Any] = size
A_ : List[str] = do_normalize
A_ : int = image_mean
A_ : Optional[Any] = image_std
A_ : int = do_rescale
A_ : List[str] = rescale_factor
A_ : Union[str, Any] = do_pad
def _a (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a (self , lowercase , lowercase=False ):
if not batched:
A_ : Any = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A_, A_ : Tuple = image.size
else:
A_, A_ : List[str] = image.shape[1], image.shape[2]
if w < h:
A_ : List[str] = int(self.size["""shortest_edge"""] * h / w )
A_ : Union[str, Any] = self.size["""shortest_edge"""]
elif w > h:
A_ : int = self.size["""shortest_edge"""]
A_ : List[str] = int(self.size["""shortest_edge"""] * w / h )
else:
A_ : Tuple = self.size["""shortest_edge"""]
A_ : str = self.size["""shortest_edge"""]
else:
A_ : List[str] = []
for image in image_inputs:
A_, A_ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Tuple = max(lowercase , key=lambda lowercase : item[0] )[0]
A_ : Dict = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ConditionalDetrImageProcessor if is_vision_available() else None
def _a (self ):
A_ : Optional[int] = ConditionalDetrImageProcessingTester(self )
@property
def _a (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a (self ):
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """image_mean""" ) )
self.assertTrue(hasattr(lowercase , """image_std""" ) )
self.assertTrue(hasattr(lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
def _a (self ):
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
A_ : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def _a (self ):
pass
def _a (self ):
# Initialize image_processing
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_, A_ : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_, A_ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A_ : List[Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ):
# Initialize image_processing
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_, A_ : Dict = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Union[str, Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
A_, A_ : List[Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a (self ):
# Initialize image_processing
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A_, A_ : Any = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Union[str, Any] = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
A_, A_ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a (self ):
# prepare image and target
A_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A_ : Tuple = json.loads(f.read() )
A_ : Optional[Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
A_ : Any = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
A_ : List[str] = image_processing(images=lowercase , annotations=lowercase , return_tensors="""pt""" )
# verify pixel values
A_ : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase )
A_ : Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : Any = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase ) )
# verify boxes
A_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase )
A_ : Any = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase ) )
# verify is_crowd
A_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase ) )
# verify class_labels
A_ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase ) )
# verify orig_size
A_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase ) )
# verify size
A_ : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase ) )
@slow
def _a (self ):
# prepare image, target and masks_path
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A_ : Tuple = json.loads(f.read() )
A_ : List[Any] = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
A_ : str = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A_ : int = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
A_ : Optional[Any] = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="""pt""" )
# verify pixel values
A_ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowercase )
A_ : Optional[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
A_ : Optional[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowercase ) )
# verify boxes
A_ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowercase )
A_ : List[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowercase , atol=1E-3 ) )
# verify image_id
A_ : str = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowercase ) )
# verify is_crowd
A_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowercase ) )
# verify class_labels
A_ : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowercase ) )
# verify masks
A_ : Optional[Any] = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowercase )
# verify orig_size
A_ : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowercase ) )
# verify size
A_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowercase ) )
| 135
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase :Union[str, Any] = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__SCREAMING_SNAKE_CASE : bool = field(default=__UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
__SCREAMING_SNAKE_CASE : bool = field(default=__UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(default=__UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info(f'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(f' {key} = {metrics[key]}' )
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , f'{split}_results.json' ) )
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_, A_, A_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_, A_, A_ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_ : int = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
assert hasattr(lowerCamelCase__ , lowerCamelCase__ ), f'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCamelCase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
A_ : int = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCamelCase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
A_ : List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCamelCase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
A_ : Union[str, Any] = SeqaSeqDataset
# Get datasets
A_ : Union[str, Any] = (
dataset_class(
lowerCamelCase__ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
A_ : int = (
dataset_class(
lowerCamelCase__ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
A_ : Tuple = (
dataset_class(
lowerCamelCase__ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
A_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , lowerCamelCase__ ) if training_args.predict_with_generate else None
)
A_ : List[str] = SeqaSeqTrainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , data_args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , data_collator=SeqaSeqDataCollator(
lowerCamelCase__ , lowerCamelCase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , )
A_ : str = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
A_ : List[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
A_ : Any = train_result.metrics
A_ : str = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowerCamelCase__ , training_args.output_dir )
all_metrics.update(lowerCamelCase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A_ : Tuple = trainer.evaluate(metric_key_prefix="""val""" )
A_ : str = data_args.n_val
A_ : Optional[Any] = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowerCamelCase__ , training_args.output_dir )
all_metrics.update(lowerCamelCase__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
A_ : Any = trainer.predict(test_dataset=lowerCamelCase__ , metric_key_prefix="""test""" )
A_ : int = test_output.metrics
A_ : Tuple = data_args.n_test
if trainer.is_world_process_zero():
A_ : List[Any] = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowerCamelCase__ , training_args.output_dir )
all_metrics.update(lowerCamelCase__ )
if training_args.predict_with_generate:
A_ : List[Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
A_ : Tuple = lmap(str.strip , lowerCamelCase__ )
write_txt_file(lowerCamelCase__ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowerCamelCase__ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def a ( lowerCamelCase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 135
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ : Union[str, Any] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
UpperCAmelCase__ : Any = {
'camembert-base': 5_1_2,
}
UpperCAmelCase__ : int = '▁'
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : int = CamembertTokenizer
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Dict = False if not self.vocab_file else True
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Tuple = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 25
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A :
UpperCamelCase__ : Union[str, Any] =XGLMConfig
UpperCamelCase__ : Dict ={}
UpperCamelCase__ : Tuple ='gelu'
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any]=14 , lowercase_ : Dict=7 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Any=True , lowercase_ : Optional[int]=99 , lowercase_ : List[Any]=32 , lowercase_ : List[Any]=2 , lowercase_ : Dict=4 , lowercase_ : List[str]=37 , lowercase_ : int="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=512 , lowercase_ : Union[str, Any]=0.02 , ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict =parent
_lowerCamelCase : Optional[Any] =batch_size
_lowerCamelCase : Optional[int] =seq_length
_lowerCamelCase : Union[str, Any] =is_training
_lowerCamelCase : Tuple =use_input_mask
_lowerCamelCase : str =use_labels
_lowerCamelCase : Any =vocab_size
_lowerCamelCase : List[str] =d_model
_lowerCamelCase : List[Any] =num_hidden_layers
_lowerCamelCase : Union[str, Any] =num_attention_heads
_lowerCamelCase : List[Any] =ffn_dim
_lowerCamelCase : Optional[Any] =activation_function
_lowerCamelCase : Dict =activation_dropout
_lowerCamelCase : Tuple =attention_dropout
_lowerCamelCase : List[str] =max_position_embeddings
_lowerCamelCase : int =initializer_range
_lowerCamelCase : Optional[int] =None
_lowerCamelCase : Optional[Any] =0
_lowerCamelCase : List[str] =2
_lowerCamelCase : Any =1
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCamelCase : Any =None
if self.use_input_mask:
_lowerCamelCase : str =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] =self.get_config()
_lowerCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowercase_ , )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : str =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any =config_and_inputs
_lowerCamelCase : Union[str, Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : List[str] =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : Any =(
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : str =False
UpperCamelCase__ : int =False
UpperCamelCase__ : int =False
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =TFXGLMModelTester(self )
_lowerCamelCase : str =ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int =TFXGLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : str , lowercase_ : str=True ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Any =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : List[Any] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCamelCase : int =[2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
_lowerCamelCase : Dict =model.generate(lowercase_ , do_sample=lowercase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
@slow
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Any =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
_lowerCamelCase : Tuple =tokenizer('Today is a nice day and' , return_tensors='tf' )
_lowerCamelCase : Optional[int] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
_lowerCamelCase : List[Any] =model.generate(lowercase_ , do_sample=lowercase_ , seed=[7, 0] )
_lowerCamelCase : Union[str, Any] =tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : Union[str, Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowercase_ , lowercase_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Any =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Optional[Any] ='left'
# use different length sentences to test batching
_lowerCamelCase : int =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
_lowerCamelCase : List[Any] =tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
_lowerCamelCase : int =inputs['input_ids']
_lowerCamelCase : str =model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
_lowerCamelCase : Optional[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : List[str] =model.generate(input_ids=lowercase_ , max_new_tokens=12 )
_lowerCamelCase : Tuple =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Dict =model.generate(input_ids=lowercase_ , max_new_tokens=12 )
_lowerCamelCase : str =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : str =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : List[str] =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
| 199
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase :List[str] = logging.get_logger(__name__)
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ["input_features", "is_longer"]
def __init__( self : Optional[Any] , snake_case : int=64 , snake_case : List[str]=4_8000 , snake_case : int=480 , snake_case : Optional[int]=10 , snake_case : List[str]=1024 , snake_case : List[str]=0.0 , snake_case : Union[str, Any]=False , snake_case : float = 0 , snake_case : float = 1_4000 , snake_case : int = None , snake_case : str = "fusion" , snake_case : str = "repeatpad" , **snake_case : Dict , ) -> Union[str, Any]:
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
__UpperCAmelCase : str = top_db
__UpperCAmelCase : List[str] = truncation
__UpperCAmelCase : Union[str, Any] = padding
__UpperCAmelCase : str = fft_window_size
__UpperCAmelCase : List[str] = (fft_window_size >> 1) + 1
__UpperCAmelCase : Any = hop_length
__UpperCAmelCase : Optional[Any] = max_length_s
__UpperCAmelCase : str = max_length_s * sampling_rate
__UpperCAmelCase : Any = sampling_rate
__UpperCAmelCase : Optional[int] = frequency_min
__UpperCAmelCase : Dict = frequency_max
__UpperCAmelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='''htk''' , )
__UpperCAmelCase : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='''slaney''' , mel_scale='''slaney''' , )
def lowerCamelCase__ ( self : str ) -> Dict[str, Any]:
__UpperCAmelCase : int = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCamelCase__ ( self : str , snake_case : np.array , snake_case : Optional[np.array] = None ) -> np.ndarray:
__UpperCAmelCase : Dict = spectrogram(
snake_case , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='''dB''' , )
return log_mel_spectrogram.T
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
__UpperCAmelCase : int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCAmelCase : str = [0]
# randomly choose index for each part
__UpperCAmelCase : Any = np.random.choice(ranges[0] )
__UpperCAmelCase : List[str] = np.random.choice(ranges[1] )
__UpperCAmelCase : Dict = np.random.choice(ranges[2] )
__UpperCAmelCase : Dict = mel[idx_front : idx_front + chunk_frames, :]
__UpperCAmelCase : Optional[int] = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCAmelCase : Any = mel[idx_back : idx_back + chunk_frames, :]
__UpperCAmelCase : Any = torch.tensor(mel[None, None, :] )
__UpperCAmelCase : List[str] = torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=snake_case )
__UpperCAmelCase : List[Any] = mel_shrink[0][0].numpy()
__UpperCAmelCase : int = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCamelCase__ ( self : Dict , snake_case : np.array , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : Optional[Any] ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCAmelCase : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCAmelCase : Dict = len(snake_case ) - max_length
__UpperCAmelCase : Optional[int] = np.random.randint(0 , overflow + 1 )
__UpperCAmelCase : Dict = waveform[idx : idx + max_length]
__UpperCAmelCase : List[str] = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__UpperCAmelCase : Dict = self._np_extract_fbank_features(snake_case , self.mel_filters )
__UpperCAmelCase : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCAmelCase : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCAmelCase : str = np.stack([mel, mel, mel, mel] , axis=0 )
__UpperCAmelCase : Union[str, Any] = False
else:
__UpperCAmelCase : List[Any] = self._random_mel_fusion(snake_case , snake_case , snake_case )
__UpperCAmelCase : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
__UpperCAmelCase : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCAmelCase : Tuple = int(max_length / len(snake_case ) )
__UpperCAmelCase : Tuple = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__UpperCAmelCase : Union[str, Any] = int(max_length / len(snake_case ) )
__UpperCAmelCase : Tuple = np.stack(np.tile(snake_case , snake_case ) )
__UpperCAmelCase : List[Any] = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
__UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(snake_case , self.mel_filters )
__UpperCAmelCase : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__UpperCAmelCase : List[str] = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : str , snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case : str = None , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Dict , ) -> BatchFeature:
__UpperCAmelCase : List[Any] = truncation if truncation is not None else self.truncation
__UpperCAmelCase : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__UpperCAmelCase : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__UpperCAmelCase : int = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase : int = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
__UpperCAmelCase : Union[str, Any] = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase : Any = [np.asarray(snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCAmelCase : Optional[Any] = [
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case )
for waveform in raw_speech
]
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = []
for mel, longer in padded_inputs:
input_mel.append(snake_case )
is_longer.append(snake_case )
if truncation == "fusion" and sum(snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCAmelCase : int = np.random.randint(0 , len(snake_case ) )
__UpperCAmelCase : List[str] = True
if isinstance(input_mel[0] , snake_case ):
__UpperCAmelCase : str = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__UpperCAmelCase : Optional[int] = [[longer] for longer in is_longer]
__UpperCAmelCase : List[str] = {'''input_features''': input_mel, '''is_longer''': is_longer}
__UpperCAmelCase : Optional[Any] = BatchFeature(snake_case )
if return_tensors is not None:
__UpperCAmelCase : List[Any] = input_features.convert_to_tensors(snake_case )
return input_features
| 240
|
'''simple docstring'''
def _a ( _lowercase : int = 600851475143 ):
'''simple docstring'''
try:
__UpperCAmelCase : str = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : List[str] = 2
while i * i <= n:
while n % i == 0:
__UpperCAmelCase : int = i
n //= i
i += 1
if n > 1:
__UpperCAmelCase : List[str] = n
return int(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 240
| 1
|
from manim import *
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
A__ = Rectangle(height=0.5,width=0.5 )
A__ = Rectangle(height=0.25,width=0.25 )
A__ = Rectangle(height=0.46,width=0.46 ).set_stroke(width=0 )
A__ = [mem.copy() for i in range(6 )]
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = VGroup(lowercase_,lowercase_ ).arrange(lowercase_,buff=0 )
A__ = Text('CPU',font_size=2_4 )
A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
A__ = [mem.copy() for i in range(4 )]
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = Text('GPU',font_size=2_4 )
A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = Text('Model',font_size=2_4 )
A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
A__ = []
A__ = []
A__ = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
A__ = Rectangle(height=0.46 / 4,width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ),buff=0.02,direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0],direction=lowercase_,buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1],direction=lowercase_,buff=0.0 )
self.add(lowercase_ )
model_cpu_arr.append(lowercase_ )
self.add(*lowercase_,*lowercase_,*lowercase_ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = Text('Loaded Checkpoint',font_size=2_4 )
A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowercase_ )
A__ = []
A__ = []
for i, rect in enumerate(lowercase_ ):
A__ = fill.copy().set_fill(lowercase_,opacity=0.7 )
target.move_to(lowercase_ )
ckpt_arr.append(lowercase_ )
A__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowercase_ )
self.add(*lowercase_,*lowercase_ )
A__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model',font_size=1_8,)
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_,lowercase_ )
A__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint',font_size=1_8,)
blue_text.next_to(lowercase_,DOWN * 2.4,aligned_edge=key_text.get_left() )
self.add(lowercase_ )
A__ = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.',font_size=2_4,)
step_a.move_to([2, 2, 0] )
A__ = [meta_mem.copy() for i in range(6 )]
A__ = [meta_mem.copy() for i in range(6 )]
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 )
A__ = VGroup(lowercase_,lowercase_ ).arrange(lowercase_,buff=0 )
A__ = Text('Disk',font_size=2_4 )
A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowercase_,run_time=3 ),Write(lowercase_,run_time=1 ),Create(lowercase_,run_time=1 ) )
A__ = []
for i, rect in enumerate(lowercase_ ):
A__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowercase_,run_time=1.5 ) )
self.play(*lowercase_ )
self.play(FadeOut(lowercase_ ) )
A__ = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.',font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_,run_time=3 ) )
self.play(
FadeOut(lowercase_,lowercase_,*lowercase_,*lowercase_ ),)
self.wait()
| 7
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252
| 0
|
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
lowercase__ : Tuple = '''1'''
lowercase__ : Any = '''0'''
lowercase__ : Any = '''1'''
lowercase__ : Any = ort.SessionOptions()
lowercase__ : Optional[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowercase__ : List[Any] = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowercase__ : Optional[Any] = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowercase__ : List[str] = ort.RunOptions()
lowercase__ : Any = 1_28
lowercase__ : Tuple = 1
lowercase__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
lowercase__ : Any = np.ones((batch, sequence), dtype=np.intaa)
lowercase__ : Optional[Any] = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowercase__ : Any = time.time()
lowercase__ : List[Any] = 20_00
lowercase__ : int = {}
for iter in range(max_iters):
lowercase__ : Tuple = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 10_00 / max_iters))
| 190
|
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int:
__A : Dict = 3
__A : int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __UpperCamelCase :
def __init__( self , __a , ):
'''simple docstring'''
__a : List[str] = parent
__a : Union[str, Any] = 13
__a : Any = 7
__a : Optional[int] = True
__a : Tuple = True
__a : Any = False
__a : Optional[int] = True
__a : Optional[Any] = 99
__a : str = 32
__a : Union[str, Any] = 2
__a : Optional[Any] = 4
__a : Any = 37
__a : str = 'gelu'
__a : str = 0.1
__a : List[Any] = 0.1
__a : List[str] = 512
__a : Union[str, Any] = 16
__a : Union[str, Any] = 2
__a : Optional[Any] = 0.02
__a : str = 3
__a : List[Any] = 4
__a : Union[str, Any] = None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : int = None
if self.use_input_mask:
__a : int = random_attention_mask([self.batch_size, self.seq_length] )
__a : Union[str, Any] = None
__a : Union[str, Any] = None
__a : Dict = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : int = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = TFDistilBertModel(config=__a )
__a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
__a : Union[str, Any] = model(__a )
__a : Dict = [input_ids, input_mask]
__a : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = TFDistilBertForMaskedLM(config=__a )
__a : str = {'input_ids': input_ids, 'attention_mask': input_mask}
__a : Tuple = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = TFDistilBertForQuestionAnswering(config=__a )
__a : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
__a : Any = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Optional[Any] = TFDistilBertForSequenceClassification(__a )
__a : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
__a : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = self.num_choices
__a : Dict = TFDistilBertForMultipleChoice(__a )
__a : Any = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__a : Union[str, Any] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__a : List[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
__a : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = self.num_labels
__a : Union[str, Any] = TFDistilBertForTokenClassification(__a )
__a : str = {'input_ids': input_ids, 'attention_mask': input_mask}
__a : Union[str, Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Any = config_and_inputs
__a : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A_ = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = TFDistilBertModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , dim=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__a : List[str] = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
__a : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a : int = model(__a )[0]
__a : str = [1, 6, 768]
self.assertEqual(output.shape , __a )
__a : List[Any] = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 27
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""pixel_values"""]
def __init__( self : List[str] , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , **UpperCamelCase : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
__UpperCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
__UpperCAmelCase : List[Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
__UpperCAmelCase : Tuple = get_size_dict(UpperCamelCase , param_name="""crop_size""" )
__UpperCAmelCase : Dict = do_resize
__UpperCAmelCase : str = size
__UpperCAmelCase : int = resample
__UpperCAmelCase : Any = do_rescale
__UpperCAmelCase : Union[str, Any] = rescale_factor
__UpperCAmelCase : Optional[Any] = do_center_crop
__UpperCAmelCase : Optional[int] = crop_size
__UpperCAmelCase : Tuple = do_flip_channel_order
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PIL.Image.BILINEAR , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
__UpperCAmelCase : Any = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(UpperCamelCase , data_format=UpperCamelCase )
def lowerCamelCase__ ( self : str , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase : Dict , ):
'''simple docstring'''
__UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : int = resample if resample is not None else self.resample
__UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__UpperCAmelCase : List[Any] = size if size is not None else self.size
__UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
__UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""crop_size""" )
__UpperCAmelCase : Optional[int] = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase : int = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
__UpperCAmelCase : Dict = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
__UpperCAmelCase : Optional[int] = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
__UpperCAmelCase : int = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__UpperCAmelCase : Optional[Any] = [self.flip_channel_order(image=UpperCamelCase ) for image in images]
__UpperCAmelCase : Dict = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
__UpperCAmelCase : Tuple = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Tuple] = None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCamelCase ):
__UpperCAmelCase : List[str] = target_sizes.numpy()
__UpperCAmelCase : Dict = []
for idx in range(len(UpperCamelCase ) ):
__UpperCAmelCase : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase )
else:
__UpperCAmelCase : Union[str, Any] = logits.argmax(dim=1 )
__UpperCAmelCase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 320
|
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
UpperCAmelCase : List[str] = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = EfficientNetConfig()
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""]
__UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""]
__UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""]
__UpperCAmelCase : int = """huggingface/label-files"""
__UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json"""
__UpperCAmelCase : str = 1_0_0_0
__UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Dict = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def lowerCamelCase ( _UpperCamelCase : Any ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : List[str] = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , )
return preprocessor
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__UpperCAmelCase : str = sorted(set(_UpperCamelCase ) )
__UpperCAmelCase : Optional[int] = len(_UpperCamelCase )
__UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )}
__UpperCAmelCase : Any = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__UpperCAmelCase : List[str] = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__UpperCAmelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
__UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1]
__UpperCAmelCase : Tuple = """classifier.weight"""
__UpperCAmelCase : Optional[int] = """classifier.bias"""
return key_mapping
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
__UpperCAmelCase : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
__UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) )
else:
__UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = model_classes[model_name](
include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , )
__UpperCAmelCase : List[str] = original_model.trainable_variables
__UpperCAmelCase : List[Any] = original_model.non_trainable_variables
__UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__UpperCAmelCase : int = param.numpy()
__UpperCAmelCase : Dict = list(tf_params.keys() )
# Load HuggingFace model
__UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval()
__UpperCAmelCase : Any = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase )
replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Initialize preprocessor and preprocess input image
__UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase )
__UpperCAmelCase : Any = outputs.logits.detach().numpy()
# Original model inference
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
__UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase )
__UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 )
__UpperCAmelCase : str = original_model.predict(_UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCamelCase ):
os.mkdir(_UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCamelCase )
preprocessor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
__UpperCAmelCase : List[str] = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_UpperCamelCase )
hf_model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
UpperCAmelCase : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 320
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = StableDiffusionDiffEditPipeline
_A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_A = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_A = frozenset([])
def _lowerCamelCase ( self :str ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=a , )
__UpperCamelCase : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_one=a , )
__UpperCamelCase : List[Any] = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_zero=a , )
torch.manual_seed(0 )
__UpperCamelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
__UpperCamelCase : Optional[Any] = CLIPTextModel(a )
__UpperCamelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCamelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowerCamelCase ( self :Any , a :int , a :Any=0 ) -> Optional[Any]:
__UpperCamelCase : Any = floats_tensor((1, 1_6, 1_6) , rng=random.Random(a ) ).to(a )
__UpperCamelCase : Any = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(a ) ).to(a )
if str(a ).startswith("mps" ):
__UpperCamelCase : List[Any] = torch.manual_seed(a )
else:
__UpperCamelCase : List[Any] = torch.Generator(device=a ).manual_seed(a )
__UpperCamelCase : int = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCamelCase ( self :Tuple , a :int , a :Dict=0 ) -> Optional[int]:
__UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a ) ).to(a )
__UpperCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : int = Image.fromarray(np.uinta(a ) ).convert("RGB" )
if str(a ).startswith("mps" ):
__UpperCamelCase : Optional[int] = torch.manual_seed(a )
else:
__UpperCamelCase : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
__UpperCamelCase : Any = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCamelCase ( self :List[str] , a :Optional[Any] , a :Any=0 ) -> str:
__UpperCamelCase : int = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a ) ).to(a )
__UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : int = Image.fromarray(np.uinta(a ) ).convert("RGB" )
if str(a ).startswith("mps" ):
__UpperCamelCase : Optional[int] = torch.manual_seed(a )
else:
__UpperCamelCase : Optional[int] = torch.Generator(device=a ).manual_seed(a )
__UpperCamelCase : str = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _lowerCamelCase ( self :Optional[Any] ) -> List[Any]:
if not hasattr(self.pipeline_class , "_optional_components" ):
return
__UpperCamelCase : Dict = self.get_dummy_components()
__UpperCamelCase : int = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a , a , a )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__UpperCamelCase : Any = self.get_dummy_inputs(a )
__UpperCamelCase : Optional[Any] = pipe(**a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a )
__UpperCamelCase : Dict = self.pipeline_class.from_pretrained(a )
pipe_loaded.to(a )
pipe_loaded.set_progress_bar_config(disable=a )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a , a ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
__UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__UpperCamelCase : Tuple = pipe_loaded(**a )[0]
__UpperCamelCase : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(a , 1E-4 )
def _lowerCamelCase ( self :int ) -> Optional[int]:
__UpperCamelCase : Optional[int] = """cpu"""
__UpperCamelCase : Optional[Any] = self.get_dummy_components()
__UpperCamelCase : List[str] = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : Union[str, Any] = self.get_dummy_mask_inputs(a )
__UpperCamelCase : List[Any] = pipe.generate_mask(**a )
__UpperCamelCase : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
__UpperCamelCase : Optional[int] = np.array([0] * 9 )
__UpperCamelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowerCamelCase ( self :int ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = """cpu"""
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : Optional[Any] = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : Optional[int] = self.get_dummy_inversion_inputs(a )
__UpperCamelCase : List[str] = pipe.invert(**a ).images
__UpperCamelCase : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
__UpperCamelCase : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__UpperCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1E-3 )
def _lowerCamelCase ( self :Union[str, Any] ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase : List[Any] = """cpu"""
__UpperCamelCase : int = self.get_dummy_components()
__UpperCamelCase : List[Any] = {"""beta_start""": 0.00085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
__UpperCamelCase : int = DPMSolverMultistepScheduler(**a )
__UpperCamelCase : int = DPMSolverMultistepInverseScheduler(**a )
__UpperCamelCase : List[str] = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : Tuple = self.get_dummy_inversion_inputs(a )
__UpperCamelCase : Any = pipe.invert(**a ).images
__UpperCamelCase : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
__UpperCamelCase : Any = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__UpperCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1E-3 )
@require_torch_gpu
@slow
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Dict ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowerCamelCase ( cls :Optional[int] ) -> Dict:
__UpperCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
__UpperCamelCase : Union[str, Any] = raw_image.convert("RGB" ).resize((7_6_8, 7_6_8) )
__UpperCamelCase : List[str] = raw_image
def _lowerCamelCase ( self :Optional[int] ) -> Optional[Any]:
__UpperCamelCase : Dict = torch.manual_seed(0 )
__UpperCamelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a , torch_dtype=torch.floataa )
__UpperCamelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
__UpperCamelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : Tuple = """a bowl of fruit"""
__UpperCamelCase : List[Any] = """a bowl of pears"""
__UpperCamelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
__UpperCamelCase : Tuple = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a ).latents
__UpperCamelCase : Any = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
__UpperCamelCase : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a , torch_dtype=torch.floataa )
__UpperCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__UpperCamelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : int = """a bowl of fruit"""
__UpperCamelCase : int = """a bowl of pears"""
__UpperCamelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , )
__UpperCamelCase : Any = pipe.invert(
prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=2_5 , ).latents
__UpperCamelCase : str = pipe(
prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type="numpy" , ).images[0]
__UpperCamelCase : Tuple = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 232
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = len(set_a.intersection(__lowerCamelCase ) )
if alternative_union:
lowercase_ = len(__lowerCamelCase ) + len(__lowerCamelCase )
else:
lowercase_ = len(set_a.union(__lowerCamelCase ) )
return intersection / union
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(__lowerCamelCase , (list, tuple) ):
lowercase_ = [element for element in set_a if element in set_b]
if alternative_union:
lowercase_ = len(__lowerCamelCase ) + len(__lowerCamelCase )
return len(__lowerCamelCase ) / union
else:
lowercase_ = set_a + [element for element in set_b if element not in set_a]
return len(__lowerCamelCase ) / len(__lowerCamelCase )
return len(__lowerCamelCase ) / len(__lowerCamelCase )
return None
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {"""a""", """b""", """c""", """d""", """e"""}
SCREAMING_SNAKE_CASE__ = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 361
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCAmelCase : str = False
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__UpperCAmelCase : Tuple = torch.manual_seed(0 )
__UpperCAmelCase : str = pipe(
image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__UpperCAmelCase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : Any = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 115
|
lowerCAmelCase : str = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 253
| 0
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Tuple = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : int ="""t5"""
a : Optional[Any] =["""past_key_values"""]
a : Optional[int] ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self,__SCREAMING_SNAKE_CASE=3_21_28,__SCREAMING_SNAKE_CASE=5_12,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=6,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=8,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE=1_28,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=1e-6,__SCREAMING_SNAKE_CASE=1.0,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=0,__SCREAMING_SNAKE_CASE=1,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = d_kv
__lowerCAmelCase = d_ff
__lowerCAmelCase = num_layers
__lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowerCAmelCase = num_heads
__lowerCAmelCase = relative_attention_num_buckets
__lowerCAmelCase = relative_attention_max_distance
__lowerCAmelCase = dropout_rate
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_factor
__lowerCAmelCase = feed_forward_proj
__lowerCAmelCase = use_cache
__lowerCAmelCase = self.feed_forward_proj.split("""-""" )
__lowerCAmelCase = act_info[-1]
__lowerCAmelCase = act_info[0] == """gated"""
if len(__SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowerCAmelCase = """gelu_new"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,is_encoder_decoder=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
class _UpperCAmelCase ( lowerCAmelCase_ ):
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__lowerCAmelCase = """past_encoder_sequence + sequence"""
__lowerCAmelCase = {0: """batch"""}
__lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
__lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE,direction="""inputs""" )
return common_inputs
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 13
| 46
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertListEqual(dset.column_names,["""col_1""", """col_2"""] )
for i, r in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertDictEqual(__SCREAMING_SNAKE_CASE,example_records[i] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info,dset_from_dict.info )
def lowerCamelCase__ ( self ): # checks what happens with missing columns
'''simple docstring'''
__lowerCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset[0],{"""col_1""": 1} )
self.assertDictEqual(dset[1],{"""col_1""": None} ) # NB: first record is used for columns
def lowerCamelCase__ ( self ): # checks if the type can be inferred from the second record
'''simple docstring'''
__lowerCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowerCAmelCase = Dataset.from_list(__SCREAMING_SNAKE_CASE )
self.assertEqual(dset.info.features["""col_1"""],Sequence(Value("""int64""" ) ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = Dataset.from_list([] )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),0 )
self.assertListEqual(dset.column_names,[] )
| 46
| 1
|
from __future__ import annotations
import typing
from collections import Counter
def UpperCAmelCase_ ( __snake_case ) -> typing.Counter[int]:
"""simple docstring"""
_lowercase =Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__snake_case , max_perimeter + 1 ):
_lowercase =(base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__snake_case ):
_lowercase =int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCAmelCase_ ( __snake_case = 1000 ) -> int:
"""simple docstring"""
_lowercase =pythagorean_triple(__snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 5
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = None )-> None:
lowerCAmelCase_ : str = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowerCAmelCase_ : int = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Tuple = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 262
| 0
|
"""simple docstring"""
from PIL import Image
def lowerCamelCase_ (UpperCamelCase__ : Image , UpperCamelCase__ : int ):
_UpperCAmelCase : Union[str, Any] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(UpperCamelCase__ : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_lowerCAmelCase :Union[str, Any] = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 68
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=a ):
'''simple docstring'''
a__ =['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A , **A ) -> int:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 68
| 1
|
class A__ :
def __init__( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {}
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(lowerCAmelCase__ , ' -> ' , ' -> '.join([str(lowerCAmelCase__ ) for j in self.vertex[i]] ) )
def _lowerCamelCase ( self : List[Any] , a : Any , a : List[str] ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCAmelCase__ )
else:
# else make a new vertex
lowerCAmelCase__ : Dict = [to_vertex]
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = True
print(lowerCAmelCase__ , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 212
|
import argparse
import os
import re
__magic_name__: Optional[Any] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__magic_name__: Any = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
__magic_name__: Tuple = re.compile(r"\s*\(\s*\"(\S[^\"]+)\"")
def UpperCamelCase ( _A, _A = False ):
"""simple docstring"""
with open(_A, """r""", encoding="""utf-8""" ) as f:
__magic_name__ : Any = f.read()
__magic_name__ : List[Any] = content.split("""\n""" )
__magic_name__ : List[str] = []
__magic_name__ : Union[str, Any] = 0
while line_idx < len(_A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__magic_name__ : Any = len(re.search(R"""^(\s*)\S""", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__magic_name__ : List[Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__magic_name__ : List[str] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__magic_name__ : Union[str, Any] = sorted(_A, key=lambda _A : _re_identifier.search(_A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_A, """w""", encoding="""utf-8""" ) as f:
f.write("""\n""".join(_A ) )
elif "\n".join(_A ) != content:
return True
def UpperCamelCase ( _A = False ):
"""simple docstring"""
__magic_name__ : Any = [os.path.join(_A, _A ) for f in os.listdir(_A ) if f.endswith(""".py""" )]
__magic_name__ : List[str] = [sort_auto_mapping(_A, overwrite=_A ) for fname in fnames]
if not overwrite and any(_A ):
__magic_name__ : Optional[Any] = [f for f, d in zip(_A, _A ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(_A )}. Run `make style` to fix'
""" this.""" )
if __name__ == "__main__":
__magic_name__: List[str] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__magic_name__: List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 342
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Tuple = """philschmid/bart-large-cnn-samsum"""
UpperCAmelCase : str = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
UpperCAmelCase : List[str] = """summarizer"""
UpperCAmelCase : List[Any] = AutoTokenizer
UpperCAmelCase : Any = AutoModelForSeqaSeqLM
UpperCAmelCase : Any = ["""text"""]
UpperCAmelCase : Optional[Any] = ["""text"""]
def _lowercase (self : Optional[Any] , _A : List[str]) -> Dict:
return self.pre_processor(_A , return_tensors='pt' , truncation=_A)
def _lowercase (self : List[str] , _A : Dict) -> int:
return self.model.generate(**_A)[0]
def _lowercase (self : Optional[int] , _A : int) -> Optional[Any]:
return self.pre_processor.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A)
| 95
|
"""simple docstring"""
import numpy
# List of input, output pairs
_a : Optional[int]= (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_a : int= (((515, 22, 13), 555), ((61, 35, 49), 150))
_a : List[Any]= [2, 4, 1, 5]
_a : Optional[int]= len(train_data)
_a : str= 0.0_0_9
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]="train" ) -> int:
'''simple docstring'''
return calculate_hypothesis_value(UpperCAmelCase_ , UpperCAmelCase_ ) - output(
UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : int = 0
for i in range(len(UpperCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ) -> List[Any]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=m ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = 0
for i in range(UpperCAmelCase_ ):
if index == -1:
summation_value += _error(UpperCAmelCase_ )
else:
summation_value += _error(UpperCAmelCase_ ) * train_data[i][0][index]
return summation_value
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
__snake_case : Dict = summation_of_cost_derivative(UpperCAmelCase_ , UpperCAmelCase_ ) / m
return cost_derivative_value
def __UpperCAmelCase ( ) -> List[str]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__snake_case : Dict = 0.000_002
__snake_case : Optional[int] = 0
__snake_case : str = 0
while True:
j += 1
__snake_case : Optional[int] = [0, 0, 0, 0]
for i in range(0 , len(UpperCAmelCase_ ) ):
__snake_case : Union[str, Any] = get_cost_derivative(i - 1 )
__snake_case : Any = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCAmelCase_ , UpperCAmelCase_ , atol=UpperCAmelCase_ , rtol=UpperCAmelCase_ , ):
break
__snake_case : Optional[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def __UpperCAmelCase ( ) -> int:
'''simple docstring'''
for i in range(len(UpperCAmelCase_ ) ):
print(('Actual output value:', output(UpperCAmelCase_ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(UpperCAmelCase_ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 95
| 1
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCAmelCase (_UpperCamelCase ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return max(metric_fn(_UpperCamelCase , _UpperCamelCase ) for gt in ground_truths )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : List[Any] = []
if args.gold_data_mode == "qa":
__lowerCAmelCase : Optional[Any] = pd.read_csv(_UpperCamelCase , sep='\t' , header=_UpperCamelCase )
for answer_list in data[1]:
__lowerCAmelCase : Union[str, Any] = ast.literal_eval(_UpperCamelCase )
answers.append(_UpperCamelCase )
else:
__lowerCAmelCase : Optional[int] = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : Optional[int] = [[reference] for reference in references]
__lowerCAmelCase : int = 0
for prediction, ground_truths in zip(_UpperCamelCase , _UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
fa += metric_max_over_ground_truths(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = 100.0 * em / total
__lowerCAmelCase : str = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = args.k
__lowerCAmelCase : int = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : str = [line.strip() for line in open(_UpperCamelCase , 'r' ).readlines()]
__lowerCAmelCase : Tuple = 0
for hypo, reference in zip(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = set(hypo.split('\t' )[:k] )
__lowerCAmelCase : Optional[Any] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__lowerCAmelCase : str = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
def strip_title(_UpperCamelCase ):
if title.startswith('"' ):
__lowerCAmelCase : Any = title[1:]
if title.endswith('"' ):
__lowerCAmelCase : List[Any] = title[:-1]
return title
__lowerCAmelCase : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase , truncation=_UpperCamelCase , )['input_ids'].to(args.device )
__lowerCAmelCase : Optional[int] = rag_model.rag.question_encoder(_UpperCamelCase )
__lowerCAmelCase : Dict = question_enc_outputs[0]
__lowerCAmelCase : Dict = rag_model.retriever(
_UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
__lowerCAmelCase : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__lowerCAmelCase : Tuple = []
for docs in all_docs:
__lowerCAmelCase : int = [strip_title(_UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(_UpperCamelCase ) )
return provenance_strings
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
with torch.no_grad():
__lowerCAmelCase : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase , truncation=_UpperCamelCase )
__lowerCAmelCase : Tuple = inputs_dict.input_ids.to(args.device )
__lowerCAmelCase : Any = inputs_dict.attention_mask.to(args.device )
__lowerCAmelCase : Dict = rag_model.generate( # rag_model overwrites generate
_UpperCamelCase , attention_mask=_UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__lowerCAmelCase : List[Any] = rag_model.retriever.generator_tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
if args.print_predictions:
for q, a in zip(_UpperCamelCase , _UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(_UpperCamelCase , _UpperCamelCase ) )
return answers
def __lowerCAmelCase ():
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=_UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=_UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=_UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=_UpperCamelCase , type=_UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=_UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=_UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=_UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=_UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=_UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=_UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=_UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=_UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=_UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
__lowerCAmelCase : Tuple = parser.parse_args()
__lowerCAmelCase : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = {}
if args.model_type is None:
__lowerCAmelCase : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
__lowerCAmelCase : Union[str, Any] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
__lowerCAmelCase : Tuple = args.n_docs
if args.index_name is not None:
__lowerCAmelCase : List[Any] = args.index_name
if args.index_path is not None:
__lowerCAmelCase : Tuple = args.index_path
else:
__lowerCAmelCase : List[Any] = BartForConditionalGeneration
__lowerCAmelCase : Any = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , _UpperCamelCase )
__lowerCAmelCase : str = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
__lowerCAmelCase : Optional[Any] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(_UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(_UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
__lowerCAmelCase : str = RagRetriever.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_UpperCamelCase , retriever=_UpperCamelCase , **_UpperCamelCase )
model.retriever.init_retrieval()
else:
__lowerCAmelCase : Dict = model_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
__lowerCAmelCase : Tuple = []
for line in tqdm(_UpperCamelCase ):
questions.append(line.strip() )
if len(_UpperCamelCase ) == args.eval_batch_size:
__lowerCAmelCase : Union[str, Any] = evaluate_batch_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
preds_file.write('\n'.join(_UpperCamelCase ) + '\n' )
preds_file.flush()
__lowerCAmelCase : Optional[Any] = []
if len(_UpperCamelCase ) > 0:
__lowerCAmelCase : List[str] = evaluate_batch_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
preds_file.write('\n'.join(_UpperCamelCase ) )
preds_file.flush()
score_fn(_UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ = get_args()
main(args)
| 86
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
UpperCAmelCase__ = TOKENIZER_CLASSES
else:
UpperCAmelCase__ = {tokenizer_name: getattr(_lowerCAmelCase , tokenizer_name + "Fast" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
UpperCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase__ = True
if checkpoint_name is None:
UpperCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase__ = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
UpperCAmelCase__ = tokenizer_class.from_pretrained(_lowerCAmelCase , force_download=_lowerCAmelCase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase__ , UpperCAmelCase__ = checkpoint.split("/" )
UpperCAmelCase__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
elif add_prefix:
UpperCAmelCase__ = checkpoint
UpperCAmelCase__ = dump_path
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase__ = file_path.split(_lowerCAmelCase )[-1][0]
if next_char == "/":
UpperCAmelCase__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
UpperCAmelCase__ = tokenizer.save_pretrained(
_lowerCAmelCase , legacy_format=_lowerCAmelCase , filename_prefix=_lowerCAmelCase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_lowerCAmelCase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
_lowerCAmelCase : str = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 169
| 0
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures')
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@classmethod
def UpperCAmelCase_ ( cls )-> int:
'''simple docstring'''
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def UpperCAmelCase_ ( cls )-> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_ , repo_id='test-feature-extractor' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 251
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase : int = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase : Tuple = {
'google/realm-cc-news-pretrained-embedder': 5_12,
'google/realm-cc-news-pretrained-encoder': 5_12,
'google/realm-cc-news-pretrained-scorer': 5_12,
'google/realm-cc-news-pretrained-openqa': 5_12,
'google/realm-orqa-nq-openqa': 5_12,
'google/realm-orqa-nq-reader': 5_12,
'google/realm-orqa-wq-openqa': 5_12,
'google/realm-orqa-wq-reader': 5_12,
}
lowerCAmelCase : Union[str, Any] = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = RealmTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , )-> Tuple:
'''simple docstring'''
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , A_ ) != do_lower_case
or normalizer_state.get('strip_accents' , A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A_ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(A_ , normalizer_state.pop('type' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**A_ )
UpperCamelCase = do_lower_case
def UpperCAmelCase_ ( self , A_ , **A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = PaddingStrategy.MAX_LENGTH
UpperCamelCase = text
UpperCamelCase = kwargs.pop('text_pair' , A_ )
UpperCamelCase = kwargs.pop('return_tensors' , A_ )
UpperCamelCase = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(A_ ):
if batch_text_pair is not None:
UpperCamelCase = batch_text_pair[idx]
else:
UpperCamelCase = None
UpperCamelCase = super().__call__(A_ , A_ , return_tensors=A_ , **A_ )
UpperCamelCase = encoded_candidates.get('input_ids' )
UpperCamelCase = encoded_candidates.get('attention_mask' )
UpperCamelCase = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(A_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(A_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(A_ )
UpperCamelCase = {key: item for key, item in output_data.items() if len(A_ ) != 0}
return BatchEncoding(A_ , tensor_type=A_ )
def UpperCAmelCase_ ( self , A_ , A_=None )-> Any:
'''simple docstring'''
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , A_ , A_ = None )-> List[int]:
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , A_ , A_ = None )-> Tuple[str]:
'''simple docstring'''
UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 251
| 1
|
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__(self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = """"""
UpperCAmelCase__ : str = """"""
UpperCAmelCase__ : Dict = []
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase__ : Tuple = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase__ : Any = self.__min_dist_top_down_dp(_lowerCamelCase , n - 1 )
UpperCAmelCase__ : Tuple = self.__min_dist_top_down_dp(m - 1 , _lowerCamelCase )
UpperCAmelCase__ : Tuple = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase__ : Tuple = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return self.dp[m][n]
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = worda
UpperCAmelCase__ : List[Any] = worda
UpperCAmelCase__ : Optional[int] = [[-1 for _ in range(len(_lowerCamelCase ) )] for _ in range(len(_lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(_lowerCamelCase ) - 1 , len(_lowerCamelCase ) - 1 )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = worda
UpperCAmelCase__ : List[Any] = worda
UpperCAmelCase__ : Any = len(_lowerCamelCase )
UpperCAmelCase__ : List[str] = len(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase__ : List[Any] = j
elif j == 0: # second string is empty
UpperCAmelCase__ : int = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase__ : Optional[Any] = self.dp[i - 1][j - 1]
else:
UpperCAmelCase__ : Tuple = self.dp[i][j - 1]
UpperCAmelCase__ : Union[str, Any] = self.dp[i - 1][j]
UpperCAmelCase__ : List[str] = self.dp[i - 1][j - 1]
UpperCAmelCase__ : Any = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
_A = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
_A = input("""Enter the first string: """).strip()
_A = input("""Enter the second string: """).strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 171
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = BioGptTokenizer
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCAmelCase__ : str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCAmelCase__ : Union[str, Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """lower newer"""
UpperCAmelCase__ : int = """lower newer"""
return input_text, output_text
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[str] = """lower"""
UpperCAmelCase__ : Optional[Any] = ["""low""", """er</w>"""]
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = tokens + ["""<unk>"""]
UpperCAmelCase__ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
UpperCAmelCase__ : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 171
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : int=13 , lowercase_ : Optional[Any]=10 , lowercase_ : int=3 , lowercase_ : str=2 , lowercase_ : Dict=2 , lowercase_ : Dict=2 , lowercase_ : Tuple=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=32 , lowercase_ : str=5 , lowercase_ : Optional[int]=4 , lowercase_ : int=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Dict=10 , lowercase_ : Dict=0.02 , lowercase_ : List[Any]=0.9 , lowercase_ : List[Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : List[Any] = tubelet_size
SCREAMING_SNAKE_CASE_ : List[str] = num_frames
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = mask_ratio
SCREAMING_SNAKE_CASE_ : Dict = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
SCREAMING_SNAKE_CASE_ : str = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
SCREAMING_SNAKE_CASE_ : Optional[int] = int(mask_ratio * self.seq_length)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : str = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = VideoMAEModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = VideoMAEForPreTraining(lowercase_)
model.to(lowercase_)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.ones((self.num_masks,))
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
SCREAMING_SNAKE_CASE_ : List[str] = mask.expand(self.batch_size , -1).bool()
SCREAMING_SNAKE_CASE_ : int = model(lowercase_ , lowercase_)
# model only returns predictions for masked patches
SCREAMING_SNAKE_CASE_ : str = mask.sum().item()
SCREAMING_SNAKE_CASE_ : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels))
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__UpperCamelCase = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = VideoMAEModelTester(self)
SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(lowercase_)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE_ : Tuple = torch.ones((self.model_tester.num_masks,))
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
SCREAMING_SNAKE_CASE_ : Tuple = mask.expand(self.model_tester.batch_size , -1).bool()
SCREAMING_SNAKE_CASE_ : List[str] = bool_masked_pos.to(lowercase_)
if return_labels:
if model_class in [
*get_values(lowercase_),
]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[Any] = VideoMAEModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : str = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE_ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE_ : str = len(lowercase_)
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_))
self.assertEqual(out_len + 1 , len(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : Any , lowercase_ : Tuple , lowercase_ : int):
SCREAMING_SNAKE_CASE_ : Dict = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : str = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_) , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE_ : str = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
def _A () -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE_ : Any = np.load(__a )
return list(__a )
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''').to(
lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : str = prepare_video()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : Dict = torch.Size((1, 400))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_video()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(lowercase_ , return_tensors='''pt''').to(lowercase_)
# add boolean mask, indicating which patches to mask
SCREAMING_SNAKE_CASE_ : Dict = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 1408, 1536])
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=lowercase_)
self.assertEqual(outputs.logits.shape , lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase_ , atol=1e-4))
# verify the loss (`config.norm_pix_loss` = `True`)
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([0.51_42] , device=lowercase_)
self.assertTrue(torch.allclose(outputs.loss , lowercase_ , atol=1e-4))
# verify the loss (`config.norm_pix_loss` = `False`)
SCREAMING_SNAKE_CASE_ : Optional[int] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowercase_).to(
lowercase_)
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowercase_)
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(torch.tensor([0.64_69]) , device=lowercase_)
self.assertTrue(torch.allclose(outputs.loss , lowercase_ , atol=1e-4))
| 318
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
UpperCAmelCase_ : List[Any] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
UpperCAmelCase_ : Optional[int] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
UpperCAmelCase_ : Tuple = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False):
'''simple docstring'''
if return_pvalue:
SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
| 318
| 1
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self : List[Any] , a__ : Optional[int] , a__ : Optional[int]=13 , a__ : List[Any]=7 , a__ : Dict=True , a__ : Optional[Any]=True , a__ : List[Any]=True , a__ : Optional[Any]=True , a__ : Optional[int]=99 , a__ : Optional[Any]=32 , a__ : List[str]=5 , a__ : Any=4 , a__ : str=37 , a__ : Optional[int]="gelu" , a__ : Optional[Any]=0.1 , a__ : Dict=0.1 , a__ : Any=512 , a__ : Union[str, Any]=16 , a__ : Any=2 , a__ : Optional[int]=0.0_2 , a__ : Optional[int]=4 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = True
__snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Any = True
A_ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a (self : Dict ):
"""simple docstring"""
__snake_case = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a (self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a__ )
__snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def a (self : str ):
"""simple docstring"""
__snake_case = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a__ )
__snake_case = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__snake_case = model(a__ )[0]
__snake_case = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , a__ )
# compare the actual values for a slice.
__snake_case = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
@slow
def a (self : Any ):
"""simple docstring"""
__snake_case = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a__ )
__snake_case = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__snake_case = model(a__ )[0]
# compare the actual values for a slice.
__snake_case = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
| 24
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 326
| 0
|
from math import pi
def _lowerCAmelCase ( A__: List[str] , A__: Optional[Any] ):
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 371
|
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCAmelCase = 6
UpperCAmelCase = 1
UpperCAmelCase = 1901
UpperCAmelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCAmelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 152
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''SpeechT5FeatureExtractor'''
__lowerCAmelCase = '''SpeechT5Tokenizer'''
def __init__(self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__(self : int , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Dict ):
A = kwargs.pop("""audio""" , _lowerCAmelCase )
A = kwargs.pop("""text""" , _lowerCAmelCase )
A = kwargs.pop("""text_target""" , _lowerCAmelCase )
A = kwargs.pop("""audio_target""" , _lowerCAmelCase )
A = kwargs.pop("""sampling_rate""" , _lowerCAmelCase )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
A = self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
elif text is not None:
A = self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
else:
A = None
if audio_target is not None:
A = self.feature_extractor(audio_target=_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
A = targets["""input_values"""]
elif text_target is not None:
A = self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
A = targets["""input_ids"""]
else:
A = None
if inputs is None:
return targets
if targets is not None:
A = labels
A = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
A = decoder_attention_mask
return inputs
def A (self : str , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ):
A = kwargs.pop("""input_values""" , _lowerCAmelCase )
A = kwargs.pop("""input_ids""" , _lowerCAmelCase )
A = kwargs.pop("""labels""" , _lowerCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
A = self.feature_extractor.pad(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
elif input_ids is not None:
A = self.tokenizer.pad(_lowerCAmelCase , **_lowerCAmelCase )
else:
A = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowerCAmelCase , _lowerCAmelCase ) and "input_ids" in labels[0]):
A = self.tokenizer.pad(_lowerCAmelCase , **_lowerCAmelCase )
A = targets["""input_ids"""]
else:
A = self.feature_extractor.feature_size
A = self.feature_extractor.num_mel_bins
A = self.feature_extractor.pad(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
A = feature_size_hack
A = targets["""input_values"""]
else:
A = None
if inputs is None:
return targets
if targets is not None:
A = labels
A = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
A = decoder_attention_mask
return inputs
def A (self : List[str] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def A (self : Union[str, Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Dict ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
| 258
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''encodec'''
def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _lowerCAmelCase : List[str]=2_4000 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Dict=128 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : int=[8, 5, 4, 2] , _lowerCAmelCase : Any="weight_norm" , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : str="reflect" , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Tuple=1024 , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : List[str] , ):
A = target_bandwidths
A = sampling_rate
A = audio_channels
A = normalize
A = chunk_length_s
A = overlap
A = hidden_size
A = num_filters
A = num_residual_layers
A = upsampling_ratios
A = norm_type
A = kernel_size
A = last_kernel_size
A = residual_kernel_size
A = dilation_growth_rate
A = use_causal_conv
A = pad_mode
A = compress
A = num_lstm_layers
A = trim_right_ratio
A = codebook_size
A = codebook_dim if codebook_dim is not None else hidden_size
A = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_lowerCAmelCase )
@property
def A (self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A (self : Union[str, Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def A (self : Any ):
A = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def A (self : List[str] ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE__ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE__ )
# Let's go
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE__ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 356
|
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("String lengths must match!" )
SCREAMING_SNAKE_CASE__ : Dict = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
| 0
|
A__ : Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : List[Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ : List[Any] = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert len(str(lowerCamelCase_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowercase__ = year // 100
lowercase__ = (5 * (century % 4) + 2) % 7
lowercase__ = year % 100
lowercase__ = centurian % 12
lowercase__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowercase__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowercase__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = """pixel_values"""
lowercase__ = False
lowercase__ = TimmBackboneConfig
def __init__( self : Tuple, lowerCamelCase : List[str], **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self, '''timm''' )
super().__init__(lowerCamelCase )
lowercase__ = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase__ = getattr(lowerCamelCase, '''use_pretrained_backbone''', lowerCamelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase__ = config.out_indices if getattr(lowerCamelCase, '''out_indices''', lowerCamelCase ) is not None else (-1,)
lowercase__ = timm.create_model(
config.backbone, pretrained=lowerCamelCase, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCamelCase, **lowerCamelCase, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase__ = self._backbone.return_layers
lowercase__ = {layer['''module''']: str(lowerCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase )
@classmethod
def lowercase__ ( cls : List[str], lowerCamelCase : List[str], *lowerCamelCase : Optional[int], **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase__ = kwargs.pop('''config''', TimmBackboneConfig() )
lowercase__ = kwargs.pop('''use_timm_backbone''', lowerCamelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase__ = kwargs.pop('''num_channels''', config.num_channels )
lowercase__ = kwargs.pop('''features_only''', config.features_only )
lowercase__ = kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
lowercase__ = kwargs.pop('''out_indices''', config.out_indices )
lowercase__ = TimmBackboneConfig(
backbone=lowerCamelCase, num_channels=lowerCamelCase, features_only=lowerCamelCase, use_pretrained_backbone=lowerCamelCase, out_indices=lowerCamelCase, )
return super()._from_config(lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : int, lowerCamelCase : int, lowerCamelCase : Optional[int]=None, lowerCamelCase : List[Any]=None, lowerCamelCase : int=None, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase__ = self._all_layers
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = self._return_layers
lowercase__ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase__ = self._backbone(lowerCamelCase, **lowerCamelCase )
lowercase__ = None
lowercase__ = tuple(lowerCamelCase )
lowercase__ = tuple(lowerCamelCase ) if hidden_states is not None else None
if not return_dict:
lowercase__ = (feature_maps,)
if output_hidden_states:
lowercase__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase, hidden_states=lowerCamelCase, attentions=lowerCamelCase )
| 207
| 1
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=2 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=36 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_12 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=10_00 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = text_seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ = text_seq_length
snake_case_ = (image_size // patch_size) ** 2 + 1
snake_case_ = self.text_seq_length + self.image_seq_length
def UpperCamelCase__ ( self ):
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = LayoutLMvaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# text + image
snake_case_ = model(_UpperCAmelCase , pixel_values=_UpperCAmelCase )
snake_case_ = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ = model(pixel_values=_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.num_labels
snake_case_ = LayoutLMvaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.num_labels
snake_case_ = LayoutLMvaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = LayoutLMvaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__snake_case = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase__ ( self ):
snake_case_ = LayoutLMvaModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
snake_case_ = copy.deepcopy(_UpperCAmelCase )
if model_class in get_values(_UpperCAmelCase ):
snake_case_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_UpperCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
snake_case_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
elif model_class in get_values(_UpperCAmelCase ):
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
elif model_class in [
*get_values(_UpperCAmelCase ),
]:
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
elif model_class in [
*get_values(_UpperCAmelCase ),
]:
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_UpperCAmelCase , )
return inputs_dict
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
@slow
def UpperCamelCase__ ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LayoutLMvaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ()-> Dict:
"""simple docstring"""
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
return LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
snake_case_ = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_UpperCAmelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(_UpperCAmelCase )
snake_case_ = torch.tensor([[1, 2]] )
snake_case_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ = model(
input_ids=input_ids.to(_UpperCAmelCase ) , bbox=bbox.to(_UpperCAmelCase ) , pixel_values=pixel_values.to(_UpperCAmelCase ) , )
# verify the logits
snake_case_ = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , _UpperCAmelCase )
snake_case_ = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 365
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
snake_case_ = CLIPTextModel(_UpperCAmelCase )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
snake_case_ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
snake_case_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase )
snake_case_ = sd_pipe(**_UpperCAmelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 267
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["GLPNFeatureExtractor"]
SCREAMING_SNAKE_CASE__ = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 3 , lowercase = 32 , lowercase = 256 , lowercase = 32 , lowercase = None , lowercase = 0.18_215 , lowercase = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
lowerCAmelCase = VectorQuantizer(lowercase , lowercase , beta=0.25 , remap=lowercase , sane_index_shape=lowercase )
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , norm_type=lowercase , )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = True ) -> VQEncoderOutput:
lowerCAmelCase = self.encoder(lowercase )
lowerCAmelCase = self.quant_conv(lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = False , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.quantize(lowercase )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(lowercase )
lowerCAmelCase = self.decoder(lowercase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
def _snake_case ( self , lowercase , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase = sample
lowerCAmelCase = self.encode(lowercase ).latents
lowerCAmelCase = self.decode(lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
| 46
| 1
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase ( a__ : np.ndarray , a__ : np.ndarray , a__ : np.ndarray , a__ : int , a__ : int ) -> np.ndarray:
_UpperCamelCase = cva.getAffineTransform(a__ , a__ )
return cva.warpAffine(a__ , a__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
UpperCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCAmelCase , UpperCAmelCase = gray_img.shape
# set different points to rotate image
UpperCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
UpperCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
UpperCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
UpperCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
UpperCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCAmelCase = plt.figure(1)
UpperCAmelCase = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 54
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( a__ : list[list[int]] ) -> list[list[int]]:
_UpperCamelCase = []
for i in range(len(a__ ) ):
_UpperCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_UpperCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(a__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(a__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(a__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_UpperCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(a__ )
return next_generation
def lowercase ( a__ : list[list[int]] , a__ : int ) -> list[Image.Image]:
_UpperCamelCase = []
for _ in range(a__ ):
# Create output image
_UpperCamelCase = Image.new('''RGB''' , (len(cells[0] ), len(a__ )) )
_UpperCamelCase = img.load()
# Save cells to image
for x in range(len(a__ ) ):
for y in range(len(cells[0] ) ):
_UpperCamelCase = 255 - cells[y][x] * 255
_UpperCamelCase = (colour, colour, colour)
# Save image
images.append(a__ )
_UpperCamelCase = new_generation(a__ )
return images
if __name__ == "__main__":
UpperCAmelCase = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 54
| 1
|
# Imports
import numpy as np
class A__ :
def __init__( self , A_=None , A_=None , A_=None , A_=None , A_=None ):
'''simple docstring'''
self.set_matricies(red=A_ , green=A_ , blue=A_ , red_edge=A_ , nir=A_ )
def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_=None , A_=None ):
'''simple docstring'''
if red is not None:
UpperCamelCase : Tuple = red
if green is not None:
UpperCamelCase : List[str] = green
if blue is not None:
UpperCamelCase : Tuple = blue
if red_edge is not None:
UpperCamelCase : List[Any] = red_edge
if nir is not None:
UpperCamelCase : Dict = nir
return True
def __UpperCamelCase( self , A_="" , A_=None , A_=None , A_=None , A_=None , A_=None ):
'''simple docstring'''
self.set_matricies(red=A_ , green=A_ , blue=A_ , red_edge=A_ , nir=A_ )
UpperCamelCase : Any = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def __UpperCamelCase( self ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __UpperCamelCase( self ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __UpperCamelCase( self ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def __UpperCamelCase( self ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __UpperCamelCase( self , A_=0.08 , A_=1.22 , A_=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __UpperCamelCase( self ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir / self.green) - 1
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __UpperCamelCase( self ):
'''simple docstring'''
return self.nir - self.green
def __UpperCamelCase( self ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def __UpperCamelCase( self , A_=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def __UpperCamelCase( self , A_=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __UpperCamelCase( self ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __UpperCamelCase( self , A_=None , A_=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def __UpperCamelCase( self ):
'''simple docstring'''
return self.nir / self.red
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def __UpperCamelCase( self ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __UpperCamelCase( self ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def __UpperCamelCase( self ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def __UpperCamelCase( self ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCamelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __UpperCamelCase( self ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __UpperCamelCase( self ):
'''simple docstring'''
return self.nir / self.red
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def __UpperCamelCase( self ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 52
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_A = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_A = 10
_A = 256
def lowerCamelCase__ ( a__ : List[str] ) -> Optional[MinHash]:
if len(a__ ) < MIN_NUM_TOKENS:
return None
UpperCamelCase_ = MinHash(num_perm=a__ )
for token in set(a__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase__ ( a__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(a__ ) if len(t.strip() ) > 0}
class lowercase_ :
def __init__( self , *,
__UpperCamelCase = 0.85 , ):
"""simple docstring"""
UpperCamelCase_ = duplication_jaccard_threshold
UpperCamelCase_ = NUM_PERM
UpperCamelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
UpperCamelCase_ = defaultdict(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self._index.query(__UpperCamelCase )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = []
for base, duplicates in self._duplicate_clusters.items():
UpperCamelCase_ = [base] + list(__UpperCamelCase )
# reformat the cluster to be a list of dict
UpperCamelCase_ = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__UpperCamelCase )
return duplicate_clusters
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.get_duplicate_clusters()
with open(__UpperCamelCase , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase__ ( a__ : Optional[int] ) -> List[str]:
UpperCamelCase_ , UpperCamelCase_ = element
UpperCamelCase_ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( a__ : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase__ ( a__ : Type[Dataset] , a__ : float ) -> List[Any]:
UpperCamelCase_ = DuplicationIndex(duplication_jaccard_threshold=a__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a__ ) ) , max_queue_size=100 ) ):
di.add(a__ , a__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( a__ : str , a__ : str ) -> float:
UpperCamelCase_ = get_tokens(a__ )
UpperCamelCase_ = get_tokens(a__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_A = None
def lowerCamelCase__ ( a__ : str , a__ : str ) -> Optional[Any]:
UpperCamelCase_ = []
for elementa in cluster:
UpperCamelCase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
UpperCamelCase_ = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(a__ , a__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
UpperCamelCase_ = 1
extremes.append(a__ )
return extremes
def lowerCamelCase__ ( a__ : str , a__ : Optional[int] , a__ : Optional[int] ) -> str:
global _shared_dataset
UpperCamelCase_ = dataset
UpperCamelCase_ = []
UpperCamelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=a__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a__ , a__ , ) , total=len(a__ ) , ):
extremes_list.append(a__ )
return extremes_list
def lowerCamelCase__ ( a__ : Type[Dataset] , a__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
UpperCamelCase_ = make_duplicate_clusters(a__ , a__ )
UpperCamelCase_ = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
UpperCamelCase_ = {}
UpperCamelCase_ = find_extremes(a__ , a__ , a__ )
for extremes in extremes_clusters:
for element in extremes:
UpperCamelCase_ = element
UpperCamelCase_ = duplicate_indices - set(extreme_dict.keys() )
UpperCamelCase_ = dataset.filter(lambda a__ , a__ : idx not in remove_indices , with_indices=a__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
UpperCamelCase_ = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
UpperCamelCase_ = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'''Original dataset size: {len(a__ )}''' )
print(f'''Number of duplicate clusters: {len(a__ )}''' )
print(f'''Files in duplicate cluster: {len(a__ )}''' )
print(f'''Unique files in duplicate cluster: {len(a__ )}''' )
print(f'''Filtered dataset size: {len(a__ )}''' )
return ds_filter, duplicate_clusters
| 122
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
if "cls_token" in name:
A__ = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
A__ = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
A__ = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
A__ = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
A__ = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A__ = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
A__ = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
A__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
A__ = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
A__ = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
A__ = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
A__ = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
A__ = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split('''.''' )
A__ = int(key_split[1] )
if "decoder_blocks" in key:
A__ = config.decoder_hidden_size
A__ = '''decoder.decoder_layers.'''
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = config.hidden_size
A__ = '''vit.encoder.layer.'''
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = ViTMAEConfig()
if "large" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
elif "huge" in checkpoint_url:
A__ = 14
A__ = 1_280
A__ = 5_120
A__ = 32
A__ = 16
A__ = ViTMAEForPreTraining(lowercase_ )
A__ = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )['''model''']
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
A__ = model(**lowercase_ )
A__ = outputs.logits
if "large" in checkpoint_url:
A__ = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
A__ = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
A__ = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase_ , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 231
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowerCamelCase : Optional[Any] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231
| 1
|
UpperCamelCase = 8.314_462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 186
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""MobileViTFeatureExtractor"""]
UpperCamelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186
| 1
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : List[Any] = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , SCREAMING_SNAKE_CASE ).groups()[0]
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : int = file_names
lowerCAmelCase : Union[str, Any] = image_transform
lowerCAmelCase : int = label_to_id
def __len__( self ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = self.file_names[idx]
lowerCAmelCase : Any = PIL.Image.open(snake_case__ )
lowerCAmelCase : List[str] = raw_image.convert("RGB" )
if self.image_transform is not None:
lowerCAmelCase : int = self.image_transform(snake_case__ )
lowerCAmelCase : Union[str, Any] = extract_label(snake_case__ )
if self.label_to_id is not None:
lowerCAmelCase : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if args.with_tracking:
lowerCAmelCase : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase : Optional[Any] = config["lr"]
lowerCAmelCase : Tuple = int(config["num_epochs"] )
lowerCAmelCase : int = int(config["seed"] )
lowerCAmelCase : Any = int(config["batch_size"] )
lowerCAmelCase : Dict = config["image_size"]
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
lowerCAmelCase : Any = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
lowerCAmelCase : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCAmelCase : Union[str, Any] = int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCAmelCase : Any = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCAmelCase : str = os.path.split(SCREAMING_SNAKE_CASE )[-1].split("." )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Grab all the image filenames
lowerCAmelCase : Dict = [os.path.join(args.data_dir , SCREAMING_SNAKE_CASE ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
lowerCAmelCase : Tuple = [extract_label(SCREAMING_SNAKE_CASE ) for fname in file_names]
lowerCAmelCase : Optional[int] = list(set(SCREAMING_SNAKE_CASE ) )
id_to_label.sort()
lowerCAmelCase : int = {lbl: i for i, lbl in enumerate(SCREAMING_SNAKE_CASE )}
# Set the seed before splitting the data.
np.random.seed(SCREAMING_SNAKE_CASE )
torch.manual_seed(SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE )
# Split our filenames between train and validation
lowerCAmelCase : List[Any] = np.random.permutation(len(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : List[Any] = int(0.8 * len(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Optional[Any] = random_perm[:cut]
lowerCAmelCase : List[str] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCAmelCase : Optional[Any] = Compose([RandomResizedCrop(SCREAMING_SNAKE_CASE , scale=(0.5, 1.0) ), ToTensor()] )
lowerCAmelCase : Any = PetsDataset(
[file_names[i] for i in train_split] , image_transform=SCREAMING_SNAKE_CASE , label_to_id=SCREAMING_SNAKE_CASE )
# For evaluation, we use a deterministic Resize
lowerCAmelCase : Union[str, Any] = Compose([Resize(SCREAMING_SNAKE_CASE ), ToTensor()] )
lowerCAmelCase : Optional[int] = PetsDataset([file_names[i] for i in eval_split] , image_transform=SCREAMING_SNAKE_CASE , label_to_id=SCREAMING_SNAKE_CASE )
# Instantiate dataloaders.
lowerCAmelCase : List[Any] = DataLoader(SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , num_workers=4 )
lowerCAmelCase : List[Any] = DataLoader(SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase : Optional[Any] = create_model("resnet50d" , pretrained=SCREAMING_SNAKE_CASE , num_classes=len(SCREAMING_SNAKE_CASE ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase : int = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCAmelCase : str = False
for param in model.get_classifier().parameters():
lowerCAmelCase : int = True
# We normalize the batches of images to be a bit faster.
lowerCAmelCase : List[Any] = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
lowerCAmelCase : List[Any] = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase : Tuple = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
lowerCAmelCase : Tuple = OneCycleLR(optimizer=SCREAMING_SNAKE_CASE , max_lr=SCREAMING_SNAKE_CASE , epochs=SCREAMING_SNAKE_CASE , steps_per_epoch=len(SCREAMING_SNAKE_CASE ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase : Union[str, Any] = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCAmelCase : Tuple = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase : List[str] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCAmelCase : Dict = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCAmelCase : List[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCAmelCase : Tuple = os.path.splitext(SCREAMING_SNAKE_CASE )[0]
if "epoch" in training_difference:
lowerCAmelCase : Dict = int(training_difference.replace("epoch_" , "" ) ) + 1
lowerCAmelCase : str = None
else:
lowerCAmelCase : Tuple = int(training_difference.replace("step_" , "" ) )
lowerCAmelCase : int = resume_step // len(SCREAMING_SNAKE_CASE )
resume_step -= starting_epoch * len(SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
if args.with_tracking:
lowerCAmelCase : Optional[Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCAmelCase : Union[str, Any] = accelerator.skip_first_batches(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCAmelCase : Dict = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCAmelCase : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCAmelCase : Union[str, Any] = (batch["image"] - mean) / std
lowerCAmelCase : str = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = torch.nn.functional.cross_entropy(SCREAMING_SNAKE_CASE , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCAmelCase : List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
accelerator.save_state(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCAmelCase : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCAmelCase : Optional[Any] = (batch["image"] - mean) / std
with torch.no_grad():
lowerCAmelCase : int = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = outputs.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase : List[str] = accelerator.gather_for_metrics((predictions, batch["label"]) )
lowerCAmelCase : str = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCAmelCase : str = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {1_0_0 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 1_0_0 * eval_metric,
"train_loss": total_loss.item() / len(SCREAMING_SNAKE_CASE ),
"epoch": epoch,
} , step=SCREAMING_SNAKE_CASE , )
if checkpointing_steps == "epoch":
lowerCAmelCase : Any = f"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCAmelCase : Optional[Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
accelerator.save_state(SCREAMING_SNAKE_CASE )
if args.with_tracking:
accelerator.end_training()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=SCREAMING_SNAKE_CASE , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=SCREAMING_SNAKE_CASE , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=SCREAMING_SNAKE_CASE , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCAmelCase : Optional[Any] = parser.parse_args()
lowerCAmelCase : Optional[int] = {"lr": 3E-2, "num_epochs": 3, "seed": 4_2, "batch_size": 6_4, "image_size": 2_2_4}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 133
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =KandinskyVaaImgaImgPipeline
a : Optional[int] =["image_embeds", "negative_image_embeds", "image"]
a : Optional[int] =[
"image_embeds",
"negative_image_embeds",
"image",
]
a : str =[
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a : Dict =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase : int = UNetaDConditionModel(**snake_case__ )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.dummy_unet
lowerCAmelCase : Optional[int] = self.dummy_movq
lowerCAmelCase : List[str] = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCAmelCase : Tuple = DDIMScheduler(**snake_case__ )
lowerCAmelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase : List[str] = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((256, 256) )
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : Optional[int] = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[str] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = "cpu"
lowerCAmelCase : Dict = self.get_dummy_components()
lowerCAmelCase : Union[str, Any] = self.pipeline_class(**snake_case__ )
lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = output.images
lowerCAmelCase : Union[str, Any] = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
lowerCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase : Optional[Any] = "A red cartoon frog, 4k"
lowerCAmelCase : int = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowerCAmelCase : List[Any] = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCAmelCase : Tuple = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase : Tuple = pipeline(
image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 133
| 1
|
A__ : List[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCamelCase( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Any = [False] * len(a_ )
lowerCAmelCase_ : List[str] = [s]
lowerCAmelCase_ : int = True
while queue:
lowerCAmelCase_ : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(a_ )
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : List[str] = u
return visited[t]
def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : Dict ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = [-1] * (len(a_ ))
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(a_ ,a_ ,a_ ,a_ ):
lowerCAmelCase_ : Any = float('''Inf''' )
lowerCAmelCase_ : Tuple = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase_ : Optional[int] = min(a_ ,graph[parent[s]][s] )
lowerCAmelCase_ : Tuple = parent[s]
max_flow += path_flow
lowerCAmelCase_ : str = sink
while v != source:
lowerCAmelCase_ : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase_ : str = parent[v]
for i in range(len(a_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 103
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_A = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
_A = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = ''' Hello world! cécé herlolip'''
_A = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def __UpperCamelCase ( _A , _A , _A=None ):
if not os.path.exists(_A ):
lowerCAmelCase_ = torch.hub.load('''pytorch/fairseq''' , _A ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(_A )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace('''.''' , '''-''' )
lowerCAmelCase_ = BartConfig.from_pretrained(_A )
lowerCAmelCase_ = bart.encode(_A ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(_A ).encode(_A , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(_A , _A ).all():
raise ValueError(
f"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(_A )
lowerCAmelCase_ = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(_A , _A , _A )
lowerCAmelCase_ = BartForSequenceClassification(_A ).eval()
model.load_state_dict(_A )
lowerCAmelCase_ = bart.predict('''mnli''' , _A , return_logits=_A )
lowerCAmelCase_ = model(_A )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(_A )
lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCAmelCase_ = bart.extract_features(_A )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(_A ).eval()
model.load_state_dict(_A )
lowerCAmelCase_ = model(_A ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(_A ).eval() # an existing summarization ckpt
model.model.load_state_dict(_A )
if hasattr(_A , '''lm_head''' ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(_A )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
_A = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 351
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A ( __UpperCAmelCase ):
__snake_case = 'gpt_neox'
def __init__( self, UpperCamelCase__=5_0432, UpperCamelCase__=6144, UpperCamelCase__=44, UpperCamelCase__=64, UpperCamelCase__=2_4576, UpperCamelCase__="gelu", UpperCamelCase__=0.25, UpperCamelCase__=1_0000, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__=2048, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=True, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = rotary_pct
lowerCAmelCase_ = rotary_emb_base
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = classifier_dropout
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = tie_word_embeddings
lowerCAmelCase_ = use_parallel_residual
lowerCAmelCase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
lowerCAmelCase_ = self.rope_scaling.get('''type''', UpperCamelCase__ )
lowerCAmelCase_ = self.rope_scaling.get('''factor''', UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__, UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 167
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowercase :
def __init__(self , A , A=1_3 , A=1_0 , A=3 , A=2 , A=2 , A=2 , A=True , A=True , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=1_0 , A=0.02 , A=0.9 , A=None , ):
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Any = batch_size
lowerCamelCase_ : int = image_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Union[str, Any] = patch_size
lowerCamelCase_ : int = tubelet_size
lowerCamelCase_ : Any = num_frames
lowerCamelCase_ : Union[str, Any] = is_training
lowerCamelCase_ : List[Any] = use_labels
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : Optional[Any] = num_attention_heads
lowerCamelCase_ : str = intermediate_size
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : str = type_sequence_label_size
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Tuple = mask_ratio
lowerCamelCase_ : Optional[int] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase_ : Dict = (image_size // patch_size) ** 2
lowerCamelCase_ : Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase_ : Optional[int] = int(mask_ratio * self.seq_length )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : List[Any] = None
if self.use_labels:
lowerCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Optional[Any] = VideoMAEModel(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : Dict = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Optional[int] = VideoMAEForPreTraining(A )
model.to(A )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ : Tuple = torch.ones((self.num_masks,) )
lowerCamelCase_ : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase_ : List[str] = mask.expand(self.batch_size , -1 ).bool()
lowerCamelCase_ : Any = model(A , A )
# model only returns predictions for masked patches
lowerCamelCase_ : Optional[Any] = mask.sum().item()
lowerCamelCase_ : Union[str, Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = config_and_inputs
lowerCamelCase_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : int = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase : Dict = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Tuple = False
lowerCamelCase : Tuple = False
lowerCamelCase : Optional[Any] = False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = VideoMAEModelTester(self )
lowerCamelCase_ : Dict = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def UpperCAmelCase__ (self , A , A , A=False ):
lowerCamelCase_ : Tuple = copy.deepcopy(A )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ : Tuple = torch.ones((self.model_tester.num_masks,) )
lowerCamelCase_ : Union[str, Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase_ : str = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCamelCase_ : int = bool_masked_pos.to(A )
if return_labels:
if model_class in [
*get_values(A ),
]:
lowerCamelCase_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[int] = model_class(A )
lowerCamelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase_ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
@slow
def UpperCAmelCase__ (self ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : List[Any] = VideoMAEModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase__ (self ):
if not self.has_attentions:
pass
else:
lowerCamelCase_, lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : int = True
for model_class in self.all_model_classes:
lowerCamelCase_ : Dict = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) )
lowerCamelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase_ : List[str] = model(**self._prepare_for_class(A , A ) )
lowerCamelCase_ : Tuple = outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase_ : Union[str, Any] = len(A )
# Check attention is always last and order is fine
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : str = True
lowerCamelCase_ : Dict = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase_ : int = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 1 , len(A ) )
lowerCamelCase_ : str = outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase__ (self ):
def check_hidden_states_output(A , A , A ):
lowerCamelCase_ : int = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Optional[int] = model(**self._prepare_for_class(A , A ) )
lowerCamelCase_ : List[Any] = outputs.hidden_states
lowerCamelCase_ : int = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(A ) , A )
lowerCamelCase_ : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_, lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Optional[int] = True
check_hidden_states_output(A , A , A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ (self ):
pass
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : int = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCamelCase_ : str = np.load(_lowercase )
return list(_lowercase )
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
A )
lowerCamelCase_ : List[str] = self.default_image_processor
lowerCamelCase_ : Any = prepare_video()
lowerCamelCase_ : Optional[int] = image_processor(A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Dict = model(**A )
# verify the logits
lowerCamelCase_ : Tuple = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , A )
lowerCamelCase_ : Union[str, Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(A )
lowerCamelCase_ : Tuple = self.default_image_processor
lowerCamelCase_ : List[Any] = prepare_video()
lowerCamelCase_ : Union[str, Any] = image_processor(A , return_tensors='''pt''' ).to(A )
# add boolean mask, indicating which patches to mask
lowerCamelCase_ : str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
lowerCamelCase_ : int = torch.load(A )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Any = model(**A )
# verify the logits
lowerCamelCase_ : Optional[int] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCamelCase_ : Dict = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=A )
self.assertEqual(outputs.logits.shape , A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , A , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase_ : Any = torch.tensor([0.51_42] , device=A )
self.assertTrue(torch.allclose(outputs.loss , A , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase_ : Union[str, Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=A ).to(
A )
with torch.no_grad():
lowerCamelCase_ : List[Any] = model(**A )
lowerCamelCase_ : int = torch.tensor(torch.tensor([0.64_69] ) , device=A )
self.assertTrue(torch.allclose(outputs.loss , A , atol=1E-4 ) )
| 318
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318
| 1
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple ) ->int:
# Initialise PyTorch model
A__ : Optional[int] = TaConfig.from_json_file(UpperCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
A__ : int = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 359
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->list:
'''simple docstring'''
if n_term == "":
return []
a : list = []
for temp in range(int(_lowercase ) ):
series.append(F"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
a : Any = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 105
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[Any]) ->Tuple:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
A__ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
A__ = '''sgugger/tiny-distilbert-classification'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , only_pretrain_model=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , torchscript=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
# set architectures equal to `None`
A__ = None
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase__ , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = '''sshleifer/tinier_bart'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = '''sshleifer/tinier_bart'''
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__ , configs=[config])
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , save_to_csv=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase__ , '''inf_time.csv''') , train_memory_csv_file=os.path.join(UpperCAmelCase__ , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(UpperCAmelCase__ , '''inf_mem.csv''') , train_time_csv_file=os.path.join(UpperCAmelCase__ , '''train_time.csv''') , env_info_csv_file=os.path.join(UpperCAmelCase__ , '''env.csv''') , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''env.csv''')).exists())
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(UpperCAmelCase__ : Tuple):
self.assertTrue(hasattr(UpperCAmelCase__ , '''sequential'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''cumulative'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''current'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase__ , '''log.txt''') , log_print=UpperCAmelCase__ , trace_memory_line_by_line=UpperCAmelCase__ , multi_process=UpperCAmelCase__ , )
A__ = PyTorchBenchmark(UpperCAmelCase__)
A__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , '''log.txt''')).exists())
| 231
|
from __future__ import annotations
from collections import Counter
from random import random
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ = {}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str) ->None:
'''simple docstring'''
A__ = {}
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : float) ->None:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(UpperCAmelCase__)
if nodea not in self.connections:
self.add_node(UpperCAmelCase__)
A__ = probability
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->list[str]:
'''simple docstring'''
return list(self.connections)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->str:
'''simple docstring'''
A__ = 0
A__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, int]:
"""simple docstring"""
A__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase_ , lowercase_ , lowercase_ )
A__ = Counter(graph.get_nodes() )
A__ = start
for _ in range(lowercase_ ):
A__ = graph.transition(lowercase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = '''vit_msn'''
def __init__( self, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.0, lowercase_=0.0, lowercase_=0.02, lowercase_=1E-06, lowercase_=224, lowercase_=16, lowercase_=3, lowercase_=True, **lowercase_, ) -> str:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =layer_norm_eps
a__ =image_size
a__ =patch_size
a__ =num_channels
a__ =qkv_bias
| 188
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowerCamelCase_ = logging.getLogger(__name__)
def __lowerCamelCase ( ) -> int:
__SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=a_ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=a_ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=a_ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=a_ , default='''data/dump''' , help='''The dump file prefix.''' )
__SCREAMING_SNAKE_CASE :Any = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
__SCREAMING_SNAKE_CASE :Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__SCREAMING_SNAKE_CASE :Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__SCREAMING_SNAKE_CASE :str = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__SCREAMING_SNAKE_CASE :Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__SCREAMING_SNAKE_CASE :Optional[int] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__SCREAMING_SNAKE_CASE :str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__SCREAMING_SNAKE_CASE :Union[str, Any] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f'''{len(a_ )} examples to process.''' )
__SCREAMING_SNAKE_CASE :Optional[int] = []
__SCREAMING_SNAKE_CASE :List[str] = 0
__SCREAMING_SNAKE_CASE :Optional[Any] = 1_00_00
__SCREAMING_SNAKE_CASE :List[Any] = time.time()
for text in data:
__SCREAMING_SNAKE_CASE :Any = f'''{bos} {text.strip()} {sep}'''
__SCREAMING_SNAKE_CASE :int = tokenizer.encode(a_ , add_special_tokens=a_ )
rslt.append(a_ )
iter += 1
if iter % interval == 0:
__SCREAMING_SNAKE_CASE :Any = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
__SCREAMING_SNAKE_CASE :Any = time.time()
logger.info('''Finished binarization''' )
logger.info(f'''{len(a_ )} examples processed.''' )
__SCREAMING_SNAKE_CASE :Optional[int] = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
__SCREAMING_SNAKE_CASE :str = tokenizer.vocab_size
if vocab_size < (1 << 16):
__SCREAMING_SNAKE_CASE :Union[str, Any] = [np.uintaa(a_ ) for d in rslt]
else:
__SCREAMING_SNAKE_CASE :List[Any] = [np.intaa(a_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(a_ , '''wb''' ) as handle:
pickle.dump(rslt_ , a_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 191
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : Tuple = parent
__snake_case : str = batch_size
__snake_case : Tuple = seq_length
__snake_case : List[str] = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : List[Any] = use_labels
__snake_case : Tuple = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : str = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : int = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Optional[int] = num_choices
__snake_case : str = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[Any] = None
if self.use_token_type_ids:
__snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : str = None
__snake_case : str = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = LlamaModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : str = model(a_ , attention_mask=a_ )
__snake_case : Tuple = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : int = True
__snake_case : Any = LlamaModel(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case : Tuple = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , )
__snake_case : int = model(a_ , attention_mask=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LlamaForCausalLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : str = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = True
__snake_case : int = True
__snake_case : Optional[Any] = LlamaForCausalLM(config=a_ )
model.to(a_ )
model.eval()
# first forward pass
__snake_case : List[Any] = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , use_cache=a_ , )
__snake_case : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : int = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : int = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , output_hidden_states=a_ , )['''hidden_states'''][0]
__snake_case : List[Any] = model(
a_ , attention_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , past_key_values=a_ , output_hidden_states=a_ , )['''hidden_states'''][0]
# select random slice
__snake_case : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Any = config_and_inputs
__snake_case : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ =(LlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ =(
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LlamaModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : List[str] = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = 3
__snake_case : Any = input_dict['''input_ids''']
__snake_case : List[Any] = input_ids.ne(1 ).to(a_ )
__snake_case : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : List[Any] = LlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = 3
__snake_case : Tuple = '''single_label_classification'''
__snake_case : Optional[int] = input_dict['''input_ids''']
__snake_case : int = input_ids.ne(1 ).to(a_ )
__snake_case : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__snake_case : Tuple = LlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = 3
__snake_case : int = '''multi_label_classification'''
__snake_case : Union[str, Any] = input_dict['''input_ids''']
__snake_case : Any = input_ids.ne(1 ).to(a_ )
__snake_case : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__snake_case : List[Any] = LlamaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Any = ids_tensor([1, 10] , config.vocab_size )
__snake_case : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Any = LlamaModel(a_ )
original_model.to(a_ )
original_model.eval()
__snake_case : List[Any] = original_model(a_ ).last_hidden_state
__snake_case : str = original_model(a_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : str = {'''type''': scaling_type, '''factor''': 10.0}
__snake_case : Optional[int] = LlamaModel(a_ )
scaled_model.to(a_ )
scaled_model.eval()
__snake_case : Union[str, Any] = scaled_model(a_ ).last_hidden_state
__snake_case : List[Any] = scaled_model(a_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a_ , a_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a_ , a_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a_ , a_ , atol=1E-5 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__snake_case : Any = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
__snake_case : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__snake_case : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , a_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case : List[str] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__snake_case : Tuple = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
__snake_case : Union[str, Any] = model(torch.tensor(a_ ) )
# Expected mean on dim = -1
__snake_case : Dict = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , a_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case : Tuple = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__snake_case : int = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
__snake_case : Dict = model(torch.tensor(a_ ) )
# Expected mean on dim = -1
__snake_case : List[str] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , a_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__snake_case : Dict = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__snake_case : List[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
__snake_case : List[Any] = model(torch.tensor(a_ ) )
__snake_case : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
__snake_case : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
__snake_case : Dict = '''Simply put, the theory of relativity states that '''
__snake_case : Optional[int] = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
__snake_case : Tuple = tokenizer.encode(a_ , return_tensors='''pt''' )
__snake_case : Optional[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=a_ )
# greedy generation outputs
__snake_case : Union[str, Any] = model.generate(a_ , max_new_tokens=64 , top_p=a_ , temperature=1 , do_sample=a_ )
__snake_case : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=a_ )
self.assertEqual(a_ , a_ )
| 24
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : str = line.split()
__snake_case : Union[str, Any] = line_number
__snake_case : Dict = words[0]
__snake_case : str = value
return result
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : str = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
__snake_case : int = value[0]
else:
__snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case : List[Any] = getattr(_snake_case , _snake_case )
__snake_case : int = value
else:
__snake_case : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : List[str] = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : Tuple = '''.'''.join([key, hf_param_name] )
else:
__snake_case : Optional[int] = key
__snake_case : List[Any] = value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE : Tuple = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = False
for key, mapped_key in MAPPING.items():
__snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : int = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Tuple = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[str] = '''weight_v'''
elif "bias" in name:
__snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Optional[int] = int(items[0] )
__snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
__snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case )
else:
__snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[int] = read_txt_into_dict(_snake_case )
__snake_case : List[Any] = idalabel
__snake_case : int = WavaVecaForSequenceClassification(_snake_case )
__snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : int = target_dict.bos_index
__snake_case : Tuple = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : Any = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Dict = 0
__snake_case : List[Any] = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = WavaVecaForCTC(_snake_case )
else:
__snake_case : Tuple = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' )
__snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case )
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
__snake_case : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24
| 1
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_A = logging.get_logger(__name__)
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 122
|
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : int ) -> float:
UpperCamelCase_ = x
UpperCamelCase_ = y
for step in range(a__ ): # noqa: B007
UpperCamelCase_ = a * a - b * b + x
UpperCamelCase_ = 2 * a * b + y
UpperCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( a__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase__ ( a__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a__ , 1 , 1 ) )
def lowerCamelCase__ ( a__ : int = 800 , a__ : int = 600 , a__ : float = -0.6 , a__ : float = 0 , a__ : float = 3.2 , a__ : int = 50 , a__ : bool = True , ) -> Image.Image:
UpperCamelCase_ = Image.new("""RGB""" , (image_width, image_height) )
UpperCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(a__ ):
for image_y in range(a__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase_ = figure_width / image_width * image_height
UpperCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase_ = get_distance(a__ , a__ , a__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase_ = get_color_coded_rgb(a__ )
else:
UpperCamelCase_ = get_black_and_white_rgb(a__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_A = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 122
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Any = {}
class __magic_name__ ( __lowerCAmelCase):
A: str = "llama"
A: List[Any] = ["past_key_values"]
def __init__( self : Dict , lowerCamelCase__ : Optional[int]=32000 , lowerCamelCase__ : int=4096 , lowerCamelCase__ : Any=11008 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Optional[Any]=32 , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any="silu" , lowerCamelCase__ : Optional[Any]=2048 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Dict=1E-6 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : int=0 , lowerCamelCase__ : str=1 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Tuple=1 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : str=None , **lowerCamelCase__ : str , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Tuple = vocab_size
UpperCamelCase__ : Tuple = max_position_embeddings
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : Dict = intermediate_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : Optional[int] = num_key_value_heads
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Tuple = rms_norm_eps
UpperCamelCase__ : Any = pretraining_tp
UpperCamelCase__ : Tuple = use_cache
UpperCamelCase__ : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ , )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
UpperCamelCase__ : List[str] = self.rope_scaling.get('''type''' , lowerCamelCase__ )
UpperCamelCase__ : Dict = self.rope_scaling.get('''factor''' , lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 51
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Dict = image.size
UpperCamelCase__ , UpperCamelCase__ : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
UpperCamelCase__ : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
UpperCamelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
UpperCamelCase__ : int = torch.from_numpy(SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : VQModel , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : int , lowerCamelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : Optional[int] = 100 , lowerCamelCase__ : Optional[float] = 0.0 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase__ : int = 1
elif isinstance(lowerCamelCase__ , torch.Tensor ):
UpperCamelCase__ : Dict = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase__ )}" )
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase__ : Any = preprocess(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ : Tuple = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase__ : Any = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase__ : Union[str, Any] = next(self.unet.parameters() ).dtype
UpperCamelCase__ : Any = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
UpperCamelCase__ : Any = image.to(device=self.device , dtype=lowerCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
UpperCamelCase__ : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ : Dict = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ : Optional[int] = {}
if accepts_eta:
UpperCamelCase__ : Union[str, Any] = eta
for t in self.progress_bar(lowerCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase__ : Any = torch.cat([latents, image] , dim=1 )
UpperCamelCase__ : List[str] = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
UpperCamelCase__ : Dict = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : Tuple = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase__ : Tuple = self.vqvae.decode(lowerCamelCase__ ).sample
UpperCamelCase__ : Tuple = torch.clamp(lowerCamelCase__ , -1.0 , 1.0 )
UpperCamelCase__ : Any = image / 2 + 0.5
UpperCamelCase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : List[str] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 51
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Union[str, Any] = "vit"
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=7_6_8 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : Dict=3_0_7_2 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : List[str]=2_2_4 , UpperCAmelCase__ : List[str]=1_6 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1_6 , **UpperCAmelCase__ : Optional[Any] , ) -> List[Any]:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = encoder_stride
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = version.parse("1.11")
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self : Any ) -> float:
return 1E-4
| 54
|
"""simple docstring"""
from PIL import Image
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = image.load()
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
a__ : List[str] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 54
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : int = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCAmelCase : Tuple = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : int = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Any = sorted(arg_to_scheduler.keys())
UpperCAmelCase : Union[str, Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class SCREAMING_SNAKE_CASE__ ( pl.LightningModule ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str="base" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCAmelCase_)
lowercase_ = 0
lowercase_ = Path(self.hparams.output_dir)
lowercase_ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowercase_ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
lowercase_ = config
lowercase_ = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , lowerCAmelCase_ , lowerCAmelCase_):
assert hasattr(self.config , lowerCAmelCase_), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , lowerCAmelCase_ , getattr(self.hparams , lowerCAmelCase_))
if tokenizer is None:
lowercase_ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowerCAmelCase_ , )
else:
lowercase_ = tokenizer
lowercase_ = MODEL_MODES[mode]
if model is None:
lowercase_ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path) , config=self.config , cache_dir=lowerCAmelCase_ , )
else:
lowercase_ = model
def _UpperCAmelCase ( self : List[Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = self.model_type.from_pretrained(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = arg_to_scheduler[self.hparams.lr_scheduler]
lowercase_ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps())
lowercase_ = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model
lowercase_ = ["""bias""", """LayerNorm.weight"""]
lowercase_ = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
lowercase_ = Adafactor(
lowerCAmelCase_ , lr=self.hparams.learning_rate , scale_parameter=lowerCAmelCase_ , relative_step=lowerCAmelCase_)
else:
lowercase_ = AdamW(
lowerCAmelCase_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon)
lowercase_ = optimizer
lowercase_ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict):
"""simple docstring"""
return self.validation_step(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : List[str]):
"""simple docstring"""
return self.validation_end(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = max(1 , self.hparams.gpus) # TODO: consider num_tpu_cores
lowercase_ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
if stage == "test":
lowercase_ = len(self.test_dataloader().dataset)
else:
lowercase_ = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=lowerCAmelCase_)
lowercase_ = len(self.train_dataloader().dataset)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any = False):
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return self.train_loader
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Dict):
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
lowerCAmelCase_ , list(filter(lowerCAmelCase_ , self.hparams.model_name_or_path.split("""/"""))).pop() , str(self.hparams.max_seq_length) , ) , )
@pl.utilities.rank_zero_only
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = self.output_dir.joinpath("""best_tfmr""")
lowercase_ = self.step_count
self.model.save_pretrained(lowerCAmelCase_)
self.tokenizer.save_pretrained(lowerCAmelCase_)
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=lowerCAmelCase_ , help="""Pretrained config name or path if not the same as model_name""")
parser.add_argument(
"""--tokenizer_name""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(lowerCAmelCase_).parent / """test_run""" / """cache""") , type=lowerCAmelCase_ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=lowerCAmelCase_ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=lowerCAmelCase_ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=lowerCAmelCase_ , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=lowerCAmelCase_ , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=lowerCAmelCase_ , help="""The initial learning rate for Adam.""")
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=lowerCAmelCase_ , metavar=lowerCAmelCase_ , type=lowerCAmelCase_ , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCAmelCase_ , help="""Weight decay if we apply some.""")
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=lowerCAmelCase_ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--warmup_steps""" , default=0 , type=lowerCAmelCase_ , help="""Linear warmup over warmup_steps.""")
parser.add_argument("""--num_workers""" , default=4 , type=lowerCAmelCase_ , help="""kwarg passed to DataLoader""")
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=lowerCAmelCase_)
parser.add_argument("""--train_batch_size""" , default=3_2 , type=lowerCAmelCase_)
parser.add_argument("""--eval_batch_size""" , default=3_2 , type=lowerCAmelCase_)
parser.add_argument("""--adafactor""" , action="""store_true""")
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCAmelCase_)
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = trainer.lr_schedulers[0]["""scheduler"""]
lowercase_ = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int):
"""simple docstring"""
rank_zero_info("""***** Validation results *****""")
lowercase_ = trainer.callback_metrics
# Log results
for key in sorted(lowerCAmelCase_):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(lowerCAmelCase_ , str(metrics[key])))
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
rank_zero_info("""***** Test results *****""")
lowercase_ = trainer.callback_metrics
# Log and save results to file
lowercase_ = os.path.join(pl_module.hparams.output_dir , """test_results.txt""")
with open(lowerCAmelCase_ , """w""") as writer:
for key in sorted(lowerCAmelCase_):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(lowerCAmelCase_ , str(metrics[key])))
writer.write("""{} = {}\n""".format(lowerCAmelCase_ , str(metrics[key])))
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> None:
'''simple docstring'''
parser.add_argument(
"""--output_dir""" , default=str(Path(lowercase_ ).parent / """test_run""" / """model_checkpoints""" ) , type=lowercase_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowercase_ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowercase_ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowercase_ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowercase_ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=lowercase_ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(lowercase_ ).parent / """test_run""" / """dummy-train-data""" ) , type=lowercase_ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[] , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> int:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
lowercase_ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowercase_ )
# add custom checkpoints
if checkpoint_callback is None:
lowercase_ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowercase_ )
if logging_callback is None:
lowercase_ = LoggingCallback()
lowercase_ = {}
if args.fpaa:
lowercase_ = 16
if args.gpus > 1:
lowercase_ = """auto"""
lowercase_ = """ddp"""
lowercase_ = args.accumulate_grad_batches
lowercase_ = None
lowercase_ = """auto"""
lowercase_ = pl.Trainer.from_argparse_args(
lowercase_ , weights_summary=lowercase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowercase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowercase_ , )
if args.do_train:
trainer.fit(lowercase_ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 362
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase_ : int = 6):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
self.create_linked_list(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = current_node
lowercase_ = current_node
for _ in range(1 , lowerCAmelCase_):
lowercase_ = Node()
lowercase_ = current_node
lowercase_ = previous_node
lowercase_ = current_node
lowercase_ = self.front
lowercase_ = previous_node
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Any):
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase_ = self.rear.next
if self.rear:
lowercase_ = data
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase_ = self.front.data
lowercase_ = None
return data
lowercase_ = self.front
lowercase_ = old_front.next
lowercase_ = old_front.data
lowercase_ = None
return data
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
if self.is_empty():
raise Exception("""Empty Queue""")
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""")
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str]):
"""simple docstring"""
lowercase_ = None
lowercase_ = None
lowercase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def _snake_case ( snake_case__ : Sequence[float] , snake_case__ : bool = False ):
if not arr:
return 0
A = 0 if allow_empty_subarrays else float('-inf' )
A = 0.0
for num in arr:
A = max(0 if allow_empty_subarrays else num , curr_sum + num )
A = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 74
|
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ,A_ : list[int] ) -> None:
A = len(A_ )
A = [0] * len_array
if len_array > 0:
A = array[0]
for i in range(1 ,A_ ):
A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool:
A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a : Any = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to SortishSamler or not."} )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "whether to use adafactor"} )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Dropout probability. Goes into model.config."} )
__lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__lowerCamelCase = field(
default="linear" , metadata={"help": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 150
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
a : Dict = logging.get_logger(__name__)
a : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
a : List[str] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
a : Dict = {
"""allenai/led-base-16384""": 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase__() ->List[Any]:
"""simple docstring"""
lowercase__ : Optional[int]= (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase__ : str= bs[:]
lowercase__ : List[str]= 0
for b in range(2**8 ):
if b not in bs:
bs.append(A )
cs.append(2**8 + n )
n += 1
lowercase__ : Union[str, Any]= [chr(A ) for n in cs]
return dict(zip(A , A ) )
def lowercase__(A ) ->str:
"""simple docstring"""
lowercase__ : Optional[int]= set()
lowercase__ : Optional[int]= word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Tuple= char
return pairs
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["input_ids", "attention_mask"]
def __init__( self , snake_case__ , snake_case__ , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
lowercase__ : Dict= AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
lowercase__ : Dict= AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
lowercase__ : str= AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
lowercase__ : Optional[Any]= AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
lowercase__ : List[str]= AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
lowercase__ : Any= AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : int= AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
lowercase__ : int= json.load(snake_case__ )
lowercase__ : List[str]= {v: k for k, v in self.encoder.items()}
lowercase__ : Dict= errors # how to handle errors in decoding
lowercase__ : Optional[int]= bytes_to_unicode()
lowercase__ : str= {v: k for k, v in self.byte_encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
lowercase__ : Tuple= merges_handle.read().split("\n" )[1:-1]
lowercase__ : List[str]= [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : int= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : Optional[int]= {}
lowercase__ : Tuple= add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : Optional[int]= re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase__ : Any= tuple(snake_case__ )
lowercase__ : Union[str, Any]= get_pairs(snake_case__ )
if not pairs:
return token
while True:
lowercase__ : Union[str, Any]= min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ : Dict= bigram
lowercase__ : Optional[Any]= []
lowercase__ : Optional[int]= 0
while i < len(snake_case__ ):
try:
lowercase__ : Optional[int]= word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : List[Any]= j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : Optional[int]= tuple(snake_case__ )
lowercase__ : List[Any]= new_word
if len(snake_case__ ) == 1:
break
else:
lowercase__ : Tuple= get_pairs(snake_case__ )
lowercase__ : Tuple= " ".join(snake_case__ )
lowercase__ : List[str]= word
return word
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[int]= []
for token in re.findall(self.pat , snake_case__ ):
lowercase__ : int= "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.decoder.get(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Union[str, Any]= "".join(snake_case__ )
lowercase__ : Optional[int]= bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Dict= os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple= os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
lowercase__ : Any= 0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowercase__ : Optional[Any]= token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Any= [self.cls_token_id]
lowercase__ : Dict= [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : Tuple= [self.sep_token_id]
lowercase__ : Optional[int]= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False , **snake_case__ ):
'''simple docstring'''
lowercase__ : str= kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case__ ) > 0 and not text[0].isspace()):
lowercase__ : List[Any]= " " + text
return (text, kwargs)
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = PaddingStrategy.DO_NOT_PAD , snake_case__ = None , snake_case__ = None , ):
'''simple docstring'''
lowercase__ : List[str]= super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
lowercase__ : Tuple= "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ : Any= encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ : int= len(encoded_inputs["global_attention_mask"] ) != len(snake_case__ )
if needs_to_be_padded:
lowercase__ : Dict= len(snake_case__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ : Tuple= (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ : Any= [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 150
| 1
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2", "stage3"] , UpperCAmelCase=[1, 2, 3] , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
"""simple docstring"""
return
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase ):
_UpperCAmelCase = 0
return t
def check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase={} ):
with torch.no_grad():
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase , UpperCAmelCase ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has"""
F""" `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}."""
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class __lowerCamelCase ( unittest.TestCase , snake_case__):
"""simple docstring"""
UpperCamelCase__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase__ = MaskFormerSwinConfig
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(UpperCAmelCase )
backbone.to(UpperCAmelCase )
backbone.eval()
_UpperCAmelCase = backbone(**UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 39
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase_ ( ):
lowercase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=__SCREAMING_SNAKE_CASE , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def UpperCAmelCase_ ( ):
lowercase = parse_args()
# Import training_script as a module.
lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase = script_fpath.stem
lowercase = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowercase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 195
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( _lowerCamelCase ):
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "LayoutLMv2ImageProcessor"
snake_case_ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : Any=None , lowercase_ : int=None , **lowercase_ : int ):
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
snake_case_ = kwargs.pop('''feature_extractor''' )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : List[str] , lowercase_ : Tuple , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : List[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
snake_case_ = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ = features['''words''']
snake_case_ = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
snake_case_ = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
snake_case_ = self.get_overflowing_images(lowercase_ , encoded_inputs['''overflow_to_sample_mapping'''] )
snake_case_ = images
return encoded_inputs
def A_ ( self : Any , lowercase_ : str , lowercase_ : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(lowercase_ )} and {len(lowercase_ )}" )
return images_with_overflow
def A_ ( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : int ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def A_ ( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : Dict ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def A_ ( self : List[Any] ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A_ ( self : Dict ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
@property
def A_ ( self : Dict ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , )
return self.image_processor
| 72
|
'''simple docstring'''
from datetime import datetime
import requests
def __magic_name__ ( __UpperCAmelCase ) -> bytes:
'''simple docstring'''
snake_case_ = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
snake_case_ = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__UpperCAmelCase ).content
if __name__ == "__main__":
a : Optional[Any] = input('Enter Video/IGTV url: ').strip()
a : Union[str, Any] = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 72
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : str = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''xlnet'''
UpperCamelCase__ = ['''mems''']
UpperCamelCase__ = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :Dict , __magic_name__ :Optional[int]=3_2000 , __magic_name__ :Union[str, Any]=1024 , __magic_name__ :str=24 , __magic_name__ :List[Any]=16 , __magic_name__ :Tuple=4096 , __magic_name__ :Dict="gelu" , __magic_name__ :Optional[int]=True , __magic_name__ :str="bi" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :List[str]=1E-1_2 , __magic_name__ :List[str]=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Any=None , __magic_name__ :Any=True , __magic_name__ :List[str]=False , __magic_name__ :Optional[int]=False , __magic_name__ :Union[str, Any]=-1 , __magic_name__ :Optional[Any]=False , __magic_name__ :Dict="last" , __magic_name__ :str=True , __magic_name__ :List[Any]="tanh" , __magic_name__ :Tuple=0.1 , __magic_name__ :Optional[Any]=5 , __magic_name__ :int=5 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Union[str, Any]=1 , __magic_name__ :str=2 , **__magic_name__ :List[Any] , ):
'''simple docstring'''
a = vocab_size
a = d_model
a = n_layer
a = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
a = d_model // n_head
a = ff_activation
a = d_inner
a = untie_r
a = attn_type
a = initializer_range
a = layer_norm_eps
a = dropout
a = mem_len
a = reuse_len
a = bi_data
a = clamp_len
a = same_length
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_last_dropout
a = start_n_top
a = end_n_top
a = bos_token_id
a = pad_token_id
a = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , __magic_name__ , )
a = kwargs["""use_cache"""]
a = use_mems_eval
a = use_mems_train
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase__ ( self :Any , __magic_name__ :Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 228
|
def __A ( __lowerCamelCase ) -> int:
a = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __A ( __lowerCamelCase = 100 ) -> int:
a = 1
a = 2
for i in range(2 , max_n + 1 ):
a = pre_numerator
a = 2 * i // 3 if i % 3 == 0 else 1
a = cur_numerator
a = e_cont * pre_numerator + temp
return sum_digits(__lowerCamelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 228
| 1
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : Any = None
a__ : Dict = BloomTokenizerFast
a__ : str = BloomTokenizerFast
a__ : Dict = True
a__ : Optional[Any] = False
a__ : str = "tokenizer_file"
a__ : str = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def a ( self : Optional[int] ):
super().setUp()
__UpperCAmelCase = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : int , **_lowercase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def a ( self : Any ):
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
__UpperCAmelCase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__UpperCAmelCase = tokenizer.batch_encode_plus(_lowercase )['''input_ids''']
self.assertListEqual(_lowercase , _lowercase )
__UpperCAmelCase = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def a ( self : Union[str, Any] , _lowercase : List[Any]=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCAmelCase = '''This is a simple input'''
__UpperCAmelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
__UpperCAmelCase = ('''This is a simple input''', '''This is a pair''')
__UpperCAmelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(_lowercase , max_length=_lowercase )
tokenizer_r.encode_plus(_lowercase , max_length=_lowercase )
tokenizer_r.batch_encode_plus(_lowercase , max_length=_lowercase )
tokenizer_r.encode(_lowercase , max_length=_lowercase )
tokenizer_r.batch_encode_plus(_lowercase , max_length=_lowercase )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
__UpperCAmelCase = None # Hotfixing padding = None
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='''max_length''' )
# Simple input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' )
# Simple input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' , )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='''max_length''' )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' )
# Pair input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' , )
def a ( self : List[str] ):
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=_lowercase )
__UpperCAmelCase = next(iter(_lowercase ) )['''premise'''] # pick up one data
__UpperCAmelCase = list(sample_data.values() )
__UpperCAmelCase = list(map(tokenizer.encode , _lowercase ) )
__UpperCAmelCase = [tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase ) for x in output_tokens]
self.assertListEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 86
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : List[Any] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( lowerCAmelCase_ , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = BioGptTokenizer
lowerCamelCase__ : List[str] = False
def _UpperCAmelCase ( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase__ : Any = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
lowercase__ : str = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def _UpperCAmelCase ( self , a ) -> Tuple:
lowercase__ : Optional[Any] = 'lower newer'
lowercase__ : str = 'lower newer'
return input_text, output_text
def _UpperCAmelCase ( self ) -> str:
lowercase__ : List[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowercase__ : List[Any] = 'lower'
lowercase__ : List[str] = ['low', 'er</w>']
lowercase__ : str = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : List[Any] = tokens + ['<unk>']
lowercase__ : Optional[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Union[str, Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowercase__ : str = tokenizer.encode('sequence builders' , add_special_tokens=lowerCamelCase__ )
lowercase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCamelCase__ )
lowercase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
lowercase__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 77
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : List[str] = frozenset([] )
def a ( self ):
snake_case_ = 32
snake_case_ = embedder_hidden_size
# image encoding components
snake_case_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
snake_case_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=snake_case , projection_dim=snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
snake_case_ = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
snake_case_ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
snake_case_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
snake_case_ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL()
snake_case_ = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def a ( self , snake_case , snake_case=0 , snake_case=True ):
if str(snake_case ).startswith('mps' ):
snake_case_ = torch.manual_seed(snake_case )
else:
snake_case_ = torch.Generator(device=snake_case ).manual_seed(snake_case )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
if pil_image:
snake_case_ = input_image * 0.5 + 0.5
snake_case_ = input_image.clamp(0 , 1 )
snake_case_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ = DiffusionPipeline.numpy_to_pil(snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a ( self ):
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableUnCLIPImgaImgPipeline(**snake_case )
snake_case_ = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
snake_case_ = self.get_dummy_inputs(snake_case )
inputs.update({'image_embeds': None} )
snake_case_ = sd_pipe(**snake_case ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a ( self ):
snake_case_ = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def a ( self ):
snake_case_ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=snake_case )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
snake_case_ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ = pipe(snake_case , 'anime turle' , generator=snake_case , output_type='np' )
snake_case_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
snake_case_ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ = pipe(snake_case , 'anime turle' , generator=snake_case , output_type='np' )
snake_case_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
snake_case_ = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ = pipe(
snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 200
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(snake_case )
from datasets import load_dataset
snake_case_ = load_dataset('nielsr/rvlcdip-demo' )
snake_case_ = dataset['train'][0]['image'].convert('RGB' )
snake_case_ = image_processor(snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case )
snake_case_ = outputs.logits
snake_case_ = torch.Size((1, 16) )
self.assertEqual(logits.shape , snake_case )
snake_case_ = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=snake_case , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case , atol=1e-4 ) )
| 200
| 1
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 115
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : List[str] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : Tuple = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
UpperCAmelCase : str = '▁'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
__a = BarthezTokenizer
def __init__( self : Optional[int] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Union[str, Any]="<s>" , UpperCamelCase : Any="</s>" , UpperCamelCase : Tuple="</s>" , UpperCamelCase : Tuple="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : int="<mask>" , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : Tuple = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : List[Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 115
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = CustomTokenizer
pass
| 370
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 306
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase__ ( snake_case_ : Tuple=None ) -> Optional[int]:
__snake_case = argparse.ArgumentParser(add_help=snake_case_ , allow_abbrev=snake_case_ )
# The main config parser
__snake_case = config_command_parser(snake_case_ )
# The subparser to add commands to
__snake_case = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(snake_case_ , parents=[parent_parser] )
update_command_parser(snake_case_ , parents=[parent_parser] )
return config_parser
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = get_config_parser()
__snake_case = config_parser.parse_args()
if not hasattr(snake_case_ , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(snake_case_ )
if __name__ == "__main__":
main()
| 24
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24
| 1
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowerCAmelCase__ : Dict = sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
lowerCAmelCase__ : List[str] = """"""
while len(__UpperCAmelCase ) % 3 != 0:
lowerCAmelCase__ : Optional[Any] = """0""" + bin_string
lowerCAmelCase__ : List[Any] = [
bin_string[index : index + 3]
for index in range(len(__UpperCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCAmelCase__ : Optional[int] = 0
for index, val in enumerate(__UpperCAmelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCAmelCase ) )
oct_string += str(__UpperCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 212
| 0
|
def A (__A : int ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(__A , __A ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(__A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 51
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case_ : Optional[Any] = "pt"
elif is_tf_available():
snake_case_ : Union[str, Any] = "tf"
else:
snake_case_ : str = "jax"
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ByTaTokenizer
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return ByTaTokenizer.from_pretrained('''google/byt5-small''')
def lowerCamelCase ( self : List[str] , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : int , _snake_case : Tuple=False , _snake_case : Dict=20 , _snake_case : Optional[Any]=5):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
try:
UpperCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case)
except UnicodeDecodeError:
pass
toks.append((i, tok))
UpperCAmelCase_ = list(filter(lambda _snake_case: re.match(r'''^[ a-zA-Z]+$''' , t[1]) , _snake_case))
UpperCAmelCase_ = list(filter(lambda _snake_case: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case) , _snake_case))
if max_length is not None and len(_snake_case) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(_snake_case) < min_length and len(_snake_case) > 0:
while len(_snake_case) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case)
if " " not in output_txt and len(_snake_case) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case)
)
if with_prefix_space:
UpperCAmelCase_ = ''' ''' + output_txt
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
return output_txt, output_ids
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''])
UpperCAmelCase_ = tokenizer(['''hi''', '''I went to the gym''', ''''''])
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = '''Unicode €.'''
UpperCAmelCase_ = tokenizer(_snake_case)
UpperCAmelCase_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''Unicode €.</s>''')
UpperCAmelCase_ = tokenizer('''e è é ê ë''')
UpperCAmelCase_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''e è é ê ë</s>''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) , '''e è é ê ë</s>''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
if FRAMEWORK != "jax":
UpperCAmelCase_ = list(batch.input_ids.numpy()[0])
else:
UpperCAmelCase_ = list(batch.input_ids.tolist()[0])
self.assertListEqual(_snake_case , _snake_case)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _snake_case)
self.assertIn('''attention_mask''' , _snake_case)
self.assertNotIn('''decoder_input_ids''' , _snake_case)
self.assertNotIn('''decoder_attention_mask''' , _snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase_ = tokenizer(
text_target=_snake_case , max_length=32 , padding='''max_length''' , truncation=_snake_case , return_tensors=_snake_case)
self.assertEqual(32 , targets['''input_ids'''].shape[1])
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization. </s>''']
UpperCAmelCase_ = ['''Summary of the text. </s>''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCAmelCase_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , text_target=_snake_case)
self.assertEqual(_snake_case , batch['''input_ids'''][0])
self.assertEqual(_snake_case , batch['''labels'''][0])
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
shutil.rmtree(_snake_case)
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
UpperCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(125)]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_snake_case)]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer_class.from_pretrained(_snake_case)
self.assertTrue(tokenizer.decode([255]) == '''''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
UpperCAmelCase_ = 0
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(
_snake_case , skip_special_tokens=_snake_case)
for attr in attributes_list:
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , '''additional_special_tokens_ids''' , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [])
setattr(_snake_case , '''additional_special_tokens_ids''' , [token_id_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [token_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [token_id_to_test_setters])
| 51
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE : Optional[int] = 128022
SCREAMING_SNAKE_CASE : Optional[int] = 128028
@require_sentencepiece
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Optional[int] = MaMaaaTokenizer
lowercase_ : Any = False
lowercase_ : Dict = False
lowercase_ : str = True
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
super().setUp()
_lowercase : List[Any] = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
_lowercase : List[Any] = dict(zip(_lowerCAmelCase, range(len(_lowerCAmelCase))))
_lowercase : Dict = Path(self.tmpdirname)
save_json(_lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['spm_file'])
_lowercase : int = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCamelCase ( self, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **_lowerCAmelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = '</s>'
_lowercase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase), _lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase), _lowerCAmelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Dict = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0], '</s>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '<s>')
self.assertEqual(len(_lowerCAmelCase), tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('Skip this test while all models are still to be uploaded.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = self.get_tokenizer()
_lowercase : Tuple = tokenizer.tokenize('This is a test')
self.assertListEqual(_lowerCAmelCase, ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase), [2, 3, 4, 5, 6], )
_lowercase : List[str] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_lowerCAmelCase, ['▁This', '▁is', '▁a', '▁t', 'est'])
_lowercase : Dict = tokenizer.convert_tokens_to_string(_lowerCAmelCase)
self.assertEqual(_lowerCAmelCase, 'This is a test')
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = {'input_ids': [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase, model_name='facebook/m2m100_418M', revision='c168bae485c864188cf9aa0e4108b0b6934dc91e', )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : str = """facebook/m2m100_418M"""
lowercase_ : int = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowercase_ : int = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowercase_ : Union[str, Any] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def UpperCamelCase ( cls) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en', tgt_lang='fr')
_lowercase : List[str] = 1
return cls
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('ar'), 12_80_06)
self.assertEqual(self.tokenizer.get_lang_id('en'), 12_80_22)
self.assertEqual(self.tokenizer.get_lang_id('ro'), 12_80_76)
self.assertEqual(self.tokenizer.get_lang_id('mr'), 12_80_63)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.tokenizer.get_vocab()
self.assertEqual(len(_lowerCAmelCase), self.tokenizer.vocab_size)
self.assertEqual(vocab['<unk>'], 3)
self.assertIn(self.tokenizer.get_lang_token('en'), _lowerCAmelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = 'en'
_lowercase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, _lowerCAmelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.assertIn(_lowerCAmelCase, self.tokenizer.all_special_ids)
# fmt: off
_lowercase : Tuple = [FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2]
# fmt: on
_lowercase : Dict = self.tokenizer.decode(_lowerCAmelCase, skip_special_tokens=_lowerCAmelCase)
_lowercase : Any = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=_lowerCAmelCase)
self.assertEqual(_lowerCAmelCase, _lowerCAmelCase)
self.assertNotIn(self.tokenizer.eos_token, _lowerCAmelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = tempfile.mkdtemp()
_lowercase : Tuple = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_lowerCAmelCase)
_lowercase : int = MaMaaaTokenizer.from_pretrained(_lowerCAmelCase)
self.assertDictEqual(new_tok.lang_token_to_id, _lowerCAmelCase)
@require_torch
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = 'en'
_lowercase : Optional[Any] = 'fr'
_lowercase : Optional[Any] = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=_lowerCAmelCase, return_tensors='pt')
_lowercase : Optional[Any] = shift_tokens_right(
batch['labels'], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id)
for k in batch:
_lowercase : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
_lowercase : Optional[Any] = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
@require_torch
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
_lowercase : Union[str, Any] = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.tokenizer._build_translation_inputs('A test', return_tensors='pt', src_lang='en', tgt_lang='ar')
self.assertEqual(
nested_simplify(_lowerCAmelCase), {
# en_XX, A, test, EOS
'input_ids': [[12_80_22, 58, 41_83, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_80_06,
}, )
| 354
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowercase : List[str] = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase)
@torch.no_grad()
def __call__( self, lowerCamelCase = 1, lowerCamelCase = None, lowerCamelCase = 0.0, lowerCamelCase = 50, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCamelCase):
_lowercase : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowercase : Union[str, Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_lowercase : str = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowerCamelCase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_lowercase : Union[str, Any] = self.unet(lowerCamelCase, lowerCamelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, eta=lowerCamelCase, use_clipped_model_output=lowerCamelCase, generator=lowerCamelCase).prev_sample
_lowercase : Any = (image / 2 + 0.5).clamp(0, 1)
_lowercase : str = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : Optional[int] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase)
| 84
| 0
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class _a ( __a ):
__a : Optional[int] = """owlvit_text_model"""
def __init__( self : Tuple , lowercase : Union[str, Any]=49_408 , lowercase : str=512 , lowercase : int=2_048 , lowercase : Optional[Any]=12 , lowercase : Any=8 , lowercase : Optional[int]=16 , lowercase : Union[str, Any]="quick_gelu" , lowercase : Dict=1E-5 , lowercase : Tuple=0.0 , lowercase : str=0.02 , lowercase : Dict=1.0 , lowercase : str=0 , lowercase : List[str]=49_406 , lowercase : int=49_407 , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_act
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = attention_dropout
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_factor
@classmethod
def A ( cls : int , lowercase : Union[str, os.PathLike] , **lowercase : List[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase , **lowercase )
class _a ( __a ):
__a : Dict = """owlvit_vision_model"""
def __init__( self : Tuple , lowercase : str=768 , lowercase : Dict=3_072 , lowercase : int=12 , lowercase : Tuple=12 , lowercase : Optional[int]=3 , lowercase : Optional[int]=768 , lowercase : Optional[int]=32 , lowercase : Union[str, Any]="quick_gelu" , lowercase : Dict=1E-5 , lowercase : List[Any]=0.0 , lowercase : List[Any]=0.02 , lowercase : str=1.0 , **lowercase : str , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = hidden_act
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = attention_dropout
UpperCAmelCase = initializer_range
UpperCAmelCase = initializer_factor
@classmethod
def A ( cls : Optional[Any] , lowercase : Union[str, os.PathLike] , **lowercase : int ):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase , **lowercase )
class _a ( __a ):
__a : Dict = """owlvit"""
__a : Optional[Any] = True
def __init__( self : Tuple , lowercase : Tuple=None , lowercase : Optional[Any]=None , lowercase : List[str]=512 , lowercase : Any=2.6592 , lowercase : List[Any]=True , **lowercase : str , ):
'''simple docstring'''
super().__init__(**lowercase )
if text_config is None:
UpperCAmelCase = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
UpperCAmelCase = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
UpperCAmelCase = OwlViTTextConfig(**lowercase )
UpperCAmelCase = OwlViTVisionConfig(**lowercase )
UpperCAmelCase = projection_dim
UpperCAmelCase = logit_scale_init_value
UpperCAmelCase = return_dict
UpperCAmelCase = 1.0
@classmethod
def A ( cls : Tuple , lowercase : Union[str, os.PathLike] , **lowercase : List[str] ):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowercase , **lowercase )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase , **lowercase )
@classmethod
def A ( cls : List[str] , lowercase : Dict , lowercase : Dict , **lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = text_config
UpperCAmelCase = vision_config
return cls.from_dict(lowercase , **lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
class _a ( __a ):
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def A ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def A ( self : Any ):
'''simple docstring'''
return 1E-4
def A ( self : Tuple , lowercase : "ProcessorMixin" , lowercase : int = -1 , lowercase : int = -1 , lowercase : Optional["TensorType"] = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=lowercase , seq_length=lowercase , framework=lowercase )
UpperCAmelCase = super().generate_dummy_inputs(
processor.image_processor , batch_size=lowercase , framework=lowercase )
return {**text_input_dict, **image_input_dict}
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 14
| 34
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 0
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( ) -> List[Any]:
lowercase__ : str = 10
lowercase__ : int = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
lowercase__ : List[str] = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(__lowerCamelCase ) ),
} , features=__lowerCamelCase , )
return dataset
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
lowercase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=__lowerCamelCase )
return filename
# FILE_CONTENT + files
lowerCAmelCase_ = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
lowercase__ : str = FILE_CONTENT
with open(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase )
return filename
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
import bza
lowercase__ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
lowercase__ : Optional[int] = bytes(__lowerCamelCase , '''utf-8''' )
with bza.open(__lowerCamelCase , '''wb''' ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
import gzip
lowercase__ : str = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
lowercase__ : Dict = bytes(__lowerCamelCase , '''utf-8''' )
with gzip.open(__lowerCamelCase , '''wb''' ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowercase__ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
lowercase__ : Optional[int] = bytes(__lowerCamelCase , '''utf-8''' )
with lza.frame.open(__lowerCamelCase , '''wb''' ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowercase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(__lowerCamelCase , '''w''' ) as archive:
archive.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
import tarfile
lowercase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(__lowerCamelCase , '''w''' ) as f:
f.add(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
import lzma
lowercase__ : str = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
lowercase__ : Any = bytes(__lowerCamelCase , '''utf-8''' )
with lzma.open(__lowerCamelCase , '''wb''' ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Any:
import zipfile
lowercase__ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowercase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
lowercase__ : List[Any] = bytes(__lowerCamelCase , '''utf-8''' )
with zstd.open(__lowerCamelCase , '''wb''' ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
lowercase__ : Any = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase )
return filename
lowerCAmelCase_ = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase_ = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase_ = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase_ = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase_ = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( ) -> Optional[int]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : Tuple = datasets.Dataset.from_dict(__lowerCamelCase )
lowercase__ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(__lowerCamelCase ) ) as con:
lowercase__ : Union[str, Any] = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(__lowerCamelCase , '''w''' , newline='''''' ) as f:
lowercase__ : int = csv.DictWriter(__lowerCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(__lowerCamelCase , '''w''' , newline='''''' ) as f:
lowercase__ : Optional[int] = csv.DictWriter(__lowerCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
import bza
lowercase__ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(__lowerCamelCase , '''rb''' ) as f:
lowercase__ : List[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__lowerCamelCase , '''wb''' ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(__lowerCamelCase , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowerCamelCase ) ) )
f.write(__lowerCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowerCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
lowercase__ : int = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(__lowerCamelCase , '''wb''' ) as f:
lowercase__ : List[Any] = pq.ParquetWriter(__lowerCamelCase , schema=__lowerCamelCase )
lowercase__ : Optional[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__lowerCamelCase ) )] for k in DATA[0]} , schema=__lowerCamelCase )
writer.write_table(__lowerCamelCase )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowercase__ : Any = {'''data''': DATA}
with open(__lowerCamelCase , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowercase__ : str = {'''data''': DATA_DICT_OF_LISTS}
with open(__lowerCamelCase , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(__lowerCamelCase , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(__lowerCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(__lowerCamelCase , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(__lowerCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
lowercase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(__lowerCamelCase , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(__lowerCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(__lowerCamelCase , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(__lowerCamelCase ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int:
import gzip
lowercase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(__lowerCamelCase , '''rb''' ) as orig_file:
with gzip.open(__lowerCamelCase , '''wb''' ) as zipped_file:
zipped_file.writelines(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Any:
import gzip
lowercase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(__lowerCamelCase , '''rb''' ) as orig_file:
with gzip.open(__lowerCamelCase , '''wb''' ) as zipped_file:
zipped_file.writelines(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
lowercase__ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.join('''nested''' , os.path.basename(__lowerCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowerCamelCase ) ) )
f.write(__lowerCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowerCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(__lowerCamelCase , '''w''' ) as f:
f.add(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
f.add(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(__lowerCamelCase , '''w''' ) as f:
f.add(__lowerCamelCase , arcname=os.path.join('''nested''' , os.path.basename(__lowerCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : List[str] = ['''0''', '''1''', '''2''', '''3''']
lowercase__ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(__lowerCamelCase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Union[str, Any] = ['''0''', '''1''', '''2''', '''3''']
lowercase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(__lowerCamelCase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Union[str, Any] = ['''0''', '''1''', '''2''', '''3''']
lowercase__ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(__lowerCamelCase , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
lowercase__ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowerCamelCase ) ) )
f.write(__lowerCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(__lowerCamelCase ) ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(__lowerCamelCase , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : List[Any] = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
lowercase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( ) -> Union[str, Any]:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( ) -> List[Any]:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(__lowerCamelCase , '''w''' ) as f:
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ) )
f.write(__lowerCamelCase , arcname=os.path.basename(__lowerCamelCase ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : Optional[Any] = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 302
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ : List[str] = '''The dog is cute and lives in the garden house'''
lowercase__ : int = jnp.array([tokenizer.encode(_snake_case )] )
lowercase__ : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ : Optional[Any] = model(_snake_case )['''last_hidden_state''']
self.assertEqual(output.shape ,_snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,_snake_case ,atol=1e-3 ) )
| 302
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class snake_case_( a__ ):
__UpperCamelCase = '''trajectory_transformer'''
__UpperCamelCase = ['''past_key_values''']
__UpperCamelCase = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , UpperCamelCase_ : List[Any]=1_0_0 , UpperCamelCase_ : int=5 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : str=1 , UpperCamelCase_ : int=2_4_9 , UpperCamelCase_ : int=6 , UpperCamelCase_ : Tuple=1_7 , UpperCamelCase_ : Optional[Any]=2_5 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Any=1_2_8 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=0.0_006 , UpperCamelCase_ : List[Any]=5_1_2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : str=1E-12 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : str=5_0_2_5_6 , UpperCamelCase_ : Dict=5_0_2_5_6 , **UpperCamelCase_ : List[str] , ):
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : List[Any] = action_weight
lowerCAmelCase : int = reward_weight
lowerCAmelCase : Optional[int] = value_weight
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : Dict = block_size
lowerCAmelCase : Any = action_dim
lowerCAmelCase : int = observation_dim
lowerCAmelCase : int = transition_dim
lowerCAmelCase : Tuple = learning_rate
lowerCAmelCase : List[str] = n_layer
lowerCAmelCase : Union[str, Any] = n_head
lowerCAmelCase : str = n_embd
lowerCAmelCase : Any = embd_pdrop
lowerCAmelCase : Optional[int] = attn_pdrop
lowerCAmelCase : Tuple = resid_pdrop
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : List[str] = kaiming_initializer_range
lowerCAmelCase : str = use_cache
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 60
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : str = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 329
| 0
|
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(snake_case_ ) for s in shape] )}.npy'''
def _UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _UpperCamelCase ( self , snake_case_=0 , snake_case_=(4, 4, 6_4, 6_4) , snake_case_=False ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase_ : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(snake_case_ , snake_case_ ) ) , dtype=snake_case_ )
return image
def _UpperCamelCase ( self , snake_case_=False , snake_case_="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase_ : List[Any] = 'bf16' if fpaa else None
UpperCAmelCase_ : Optional[Any] = FlaxUNetaDConditionModel.from_pretrained(
snake_case_ , subfolder='unet' , dtype=snake_case_ , revision=snake_case_ )
return model, params
def _UpperCamelCase ( self , snake_case_=0 , snake_case_=(4, 7_7, 7_6_8) , snake_case_=False ):
'''simple docstring'''
UpperCAmelCase_ : Any = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase_ : Dict = jnp.array(load_hf_numpy(self.get_file_format(snake_case_ , snake_case_ ) ) , dtype=snake_case_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=snake_case_ )
UpperCAmelCase_ : int = self.get_latents(snake_case_ , fpaa=snake_case_ )
UpperCAmelCase_ : Any = self.get_encoder_hidden_states(snake_case_ , fpaa=snake_case_ )
UpperCAmelCase_ : List[Any] = model.apply(
{'params': params} , snake_case_ , jnp.array(snake_case_ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case_ , ).sample
assert sample.shape == latents.shape
UpperCAmelCase_ : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCAmelCase_ : int = jnp.array(snake_case_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(snake_case_ , snake_case_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=snake_case_ )
UpperCAmelCase_ : List[str] = self.get_latents(snake_case_ , shape=(4, 4, 9_6, 9_6) , fpaa=snake_case_ )
UpperCAmelCase_ : str = self.get_encoder_hidden_states(snake_case_ , shape=(4, 7_7, 1_0_2_4) , fpaa=snake_case_ )
UpperCAmelCase_ : int = model.apply(
{'params': params} , snake_case_ , jnp.array(snake_case_ , dtype=jnp.intaa ) , encoder_hidden_states=snake_case_ , ).sample
assert sample.shape == latents.shape
UpperCAmelCase_ : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCAmelCase_ : List[Any] = jnp.array(snake_case_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(snake_case_ , snake_case_ , atol=1E-2 )
| 359
|
'''simple docstring'''
snake_case__ : str = '''Tobias Carryer'''
from time import time
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=int(time() ) ): # noqa: B008
'''simple docstring'''
UpperCAmelCase_ : str = multiplier
UpperCAmelCase_ : Dict = increment
UpperCAmelCase_ : Tuple = modulo
UpperCAmelCase_ : Dict = seed
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
snake_case__ : Any = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 274
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58
|
_lowerCAmelCase : Dict = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_lowerCAmelCase : str = ["a", "b", "c", "d", "e"]
def lowerCAmelCase ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = start
# add current to visited
visited.append(_lowerCAmelCase )
UpperCAmelCase__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCAmelCase__ = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(_lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
UpperCAmelCase__ = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = topological_sort("a", [], [])
print(sort)
| 169
| 0
|
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def a__ ( ):
print(sum_of_series(1, 1, 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = size if size is not None else {'shortest_edge': 1_8}
SCREAMING_SNAKE_CASE_ : Optional[Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : str = min_resolution
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_resolution
SCREAMING_SNAKE_CASE_ : int = do_resize
SCREAMING_SNAKE_CASE_ : List[Any] = size
SCREAMING_SNAKE_CASE_ : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE_ : Any = crop_size
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : List[str] = image_mean
SCREAMING_SNAKE_CASE_ : Optional[int] = image_std
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : str = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 162
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""OwlViTFeatureExtractor"""]
lowerCamelCase__ = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 86
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = (DPMSolverSDEScheduler,)
__snake_case = 1_0
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
snake_case_ = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_UpperCAmelCase )
return config
def UpperCamelCase__ ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def UpperCamelCase__ ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case_ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCAmelCase , use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
snake_case_ = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
snake_case_ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 267
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ()-> Optional[Any]:
"""simple docstring"""
snake_case_ = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return image
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = dct.pop(SCREAMING_SNAKE_CASE )
snake_case_ = val
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
snake_case_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
snake_case_ = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE ), v_bias) )
snake_case_ = qkv_bias
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
snake_case_ = 364 if '''coco''' in model_name else 224
snake_case_ = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case_ = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "opt-6.7b" in model_name:
snake_case_ = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "t5-xl" in model_name:
snake_case_ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case_ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
snake_case_ = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE , text_config=SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False )-> List[Any]:
"""simple docstring"""
snake_case_ = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
snake_case_ = tokenizer('''\n''' , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids[0]
snake_case_ , snake_case_ = get_blipa_config(SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
snake_case_ = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval()
snake_case_ = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
snake_case_ , snake_case_ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
snake_case_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case_ , snake_case_ , snake_case_ = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , is_eval=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
original_model.eval()
print('''Done!''' )
# update state dict keys
snake_case_ = original_model.state_dict()
snake_case_ = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE )
if key.startswith('''Qformer.bert''' ):
snake_case_ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
snake_case_ = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
snake_case_ = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
snake_case_ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
snake_case_ = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
snake_case_ = key.replace('''t5''' , '''language''' )
snake_case_ = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = hf_model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case_ = load_demo_image()
snake_case_ = vis_processors['''eval'''](SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE )
snake_case_ = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
# create processor
snake_case_ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
snake_case_ = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
snake_case_ = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
original_model.to(SCREAMING_SNAKE_CASE )
hf_model.to(SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "opt" in model_name:
snake_case_ = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
snake_case_ = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).logits
else:
snake_case_ = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
snake_case_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
snake_case_ = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case_ = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=SCREAMING_SNAKE_CASE )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case_ = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=SCREAMING_SNAKE_CASE )
else:
# cast to same type
snake_case_ = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
snake_case_ = ''''''
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
snake_case_ = original_model.generate({'''image''': original_pixel_values} )
snake_case_ = hf_model.generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , SCREAMING_SNAKE_CASE )
snake_case_ = input_ids.shape[1]
snake_case_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE )
snake_case_ = [text.strip() for text in output_text]
print('''HF generation:''' , SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 267
| 1
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__A = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__A = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def A__ ( self ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase__ , hypotheses=lowerCamelCase__ , min_len=lowerCamelCase__ , max_len=lowerCamelCase__ )
}
| 164
|
'''simple docstring'''
def _A ( lowercase__ ):
lowercase__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase__ = True
for i in range(0 , len(lowercase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase__ , lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
for i in range(1 , len(lowercase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase__ , lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__A = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 164
| 1
|
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Optional[Any] )-> List[Any]:
super().__init__()
lowerCamelCase__ : Tuple =module
lowerCamelCase__ : List[str] =nn.Sequential(
nn.Linear(module.in_features, _UpperCAmelCase, bias=_UpperCAmelCase ), nn.Linear(_UpperCAmelCase, module.out_features, bias=_UpperCAmelCase ), )
lowerCamelCase__ : Optional[int] =(2.0 / (5 * min(module.in_features, module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight, std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case ( self : Any, lowerCamelCase : Tuple, *lowerCamelCase : Any, **lowerCamelCase : Union[str, Any] )-> Any:
return self.module(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_a = "bigscience/bloom-1b7"
# Constant values
_a = 2.109659552692574
_a = "Hello my name is"
_a = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
_a = 1_0
def snake_case ( self : Optional[int] )-> Optional[Any]:
# Models and tokenizer
lowerCamelCase__ : List[Any] =AutoTokenizer.from_pretrained(self.model_name )
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def snake_case ( self : Optional[Any] )-> Optional[Any]:
super().setUp()
# Models and tokenizer
lowerCamelCase__ : Optional[Any] =AutoModelForCausalLM.from_pretrained(
self.model_name, torch_dtype=torch.floataa, device_map='''auto''' )
lowerCamelCase__ : List[Any] =AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase, device_map='''auto''' )
def snake_case ( self : Optional[Any] )-> Any:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : List[Any] )-> List[Any]:
lowerCamelCase__ : List[str] =self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase, '''quantization_config''' ) )
lowerCamelCase__ : Union[str, Any] =config.to_dict()
lowerCamelCase__ : Union[str, Any] =config.to_diff_dict()
lowerCamelCase__ : Dict =config.to_json_string()
def snake_case ( self : Optional[Any] )-> List[Any]:
from bitsandbytes.nn import Paramsabit
lowerCamelCase__ : Optional[int] =self.model_fpaa.get_memory_footprint()
lowerCamelCase__ : int =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit, self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase__ : List[str] =get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case ( self : int )-> Optional[int]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase, torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case ( self : str )-> int:
lowerCamelCase__ : Tuple =self.tokenizer(self.input_text, return_tensors='''pt''' )
lowerCamelCase__ : List[Any] =self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=_UpperCAmelCase ), self.EXPECTED_OUTPUTS )
def snake_case ( self : Tuple )-> Union[str, Any]:
lowerCamelCase__ : str =BitsAndBytesConfig()
lowerCamelCase__ : str =True
lowerCamelCase__ : List[str] =AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=_UpperCAmelCase, device_map='''auto''' )
lowerCamelCase__ : Tuple =self.tokenizer(self.input_text, return_tensors='''pt''' )
lowerCamelCase__ : Any =model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=_UpperCAmelCase ), self.EXPECTED_OUTPUTS )
def snake_case ( self : Dict )-> Any:
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def snake_case ( self : Dict )-> int:
lowerCamelCase__ : int =BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
lowerCamelCase__ : str =AutoModelForCausalLM.from_pretrained(
self.model_name, quantization_config=_UpperCAmelCase, load_in_abit=_UpperCAmelCase, device_map='''auto''', bnb_abit_quant_type='''nf4''', )
def snake_case ( self : int )-> Optional[int]:
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase__ : str =self.tokenizer(self.input_text, return_tensors='''pt''' )
lowerCamelCase__ : str =self.model_fpaa.to(torch.floataa )
lowerCamelCase__ : int =self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ), max_new_tokens=10 )
# Check this does not throw an error
lowerCamelCase__ : List[Any] =self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase__ : Tuple =self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase__ : Any =self.model_fpaa.float()
def snake_case ( self : List[Any] )-> str:
lowerCamelCase__ : int =AutoModelForSeqaSeqLM.from_pretrained('''t5-small''', load_in_abit=_UpperCAmelCase, device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def snake_case ( cls : Tuple )-> Dict:
lowerCamelCase__ : List[Any] ='''t5-small'''
lowerCamelCase__ : List[Any] ='''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase__ : List[str] =AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase__ : Any ='''Translate in German: Hello, my dog is cute'''
def snake_case ( self : List[Any] )-> Optional[int]:
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Tuple )-> List[Any]:
from transformers import TaForConditionalGeneration
lowerCamelCase__ : List[str] =TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase__ : Union[str, Any] =None
# test with `t5-small`
lowerCamelCase__ : Dict =TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase, device_map='''auto''' )
lowerCamelCase__ : Any =self.tokenizer(self.input_text, return_tensors='''pt''' ).to(0 )
lowerCamelCase__ : str =model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
lowerCamelCase__ : int =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name, load_in_abit=_UpperCAmelCase, device_map='''auto''' )
lowerCamelCase__ : Any =self.tokenizer(self.input_text, return_tensors='''pt''' ).to(0 )
lowerCamelCase__ : str =model.generate(**_UpperCAmelCase )
lowerCamelCase__ : List[str] =modules
def snake_case ( self : Dict )-> Optional[int]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase__ : int =TaForConditionalGeneration.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase, device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linearabit ) )
lowerCamelCase__ : str =self.tokenizer(self.input_text, return_tensors='''pt''' ).to(0 )
lowerCamelCase__ : str =model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
lowerCamelCase__ : Optional[Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name, load_in_abit=_UpperCAmelCase, device_map='''auto''' )
lowerCamelCase__ : Optional[Any] =self.tokenizer(self.input_text, return_tensors='''pt''' ).to(0 )
lowerCamelCase__ : List[Any] =model.generate(**_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def snake_case ( self : Any )-> Optional[Any]:
super().setUp()
# model_name
lowerCamelCase__ : Optional[Any] ='''bigscience/bloom-560m'''
lowerCamelCase__ : int ='''t5-small'''
# Different types of model
lowerCamelCase__ : str =AutoModel.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase, device_map='''auto''' )
# Sequence classification model
lowerCamelCase__ : int =AutoModelForSequenceClassification.from_pretrained(
self.model_name, load_in_abit=_UpperCAmelCase, device_map='''auto''' )
# CausalLM model
lowerCamelCase__ : int =AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase, device_map='''auto''' )
# Seq2seq model
lowerCamelCase__ : List[Any] =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name, load_in_abit=_UpperCAmelCase, device_map='''auto''' )
def snake_case ( self : int )-> str:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[Any] )-> List[Any]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def snake_case ( self : List[Any] )-> Optional[Any]:
super().setUp()
def snake_case ( self : Any )-> Optional[int]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Tuple )-> Union[str, Any]:
lowerCamelCase__ : List[str] =pipeline(
'''text-generation''', model=self.model_name, model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa}, max_new_tokens=self.MAX_NEW_TOKENS, )
# Real second forward pass
lowerCamelCase__ : Optional[int] =self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''], self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def snake_case ( self : Any )-> str:
super().setUp()
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Tuple =AutoModelForCausalLM.from_pretrained(
self.model_name, load_in_abit=_UpperCAmelCase, device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ), {0, 1} )
# Check that inference pass works on the model
lowerCamelCase__ : str =self.tokenizer(self.input_text, return_tensors='''pt''' )
# Second real batch
lowerCamelCase__ : str =model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ), max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=_UpperCAmelCase ), self.EXPECTED_OUTPUTS )
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def snake_case ( self : List[str] )-> Optional[int]:
lowerCamelCase__ : Tuple ='''facebook/opt-350m'''
super().setUp()
def snake_case ( self : Dict )-> str:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase__ : Tuple =AutoModelForCausalLM.from_pretrained(self.model_name, load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ), {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase__ : Optional[int] =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase__ : int =param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
lowerCamelCase__ : int =LoRALayer(module.q_proj, rank=16 )
lowerCamelCase__ : List[str] =LoRALayer(module.k_proj, rank=16 )
lowerCamelCase__ : Optional[Any] =LoRALayer(module.v_proj, rank=16 )
# Step 3: dummy batch
lowerCamelCase__ : int =self.tokenizer('''Test batch ''', return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase__ : List[str] =model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase, nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
_a = "gpt2-xl"
_a = 3.3191854854152187
| 352
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[0] * len(__lowerCamelCase )
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : List[Any] =[1] * len(__lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCamelCase )
while queue:
lowerCamelCase__ : Tuple =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase__ : Optional[Any] =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCamelCase )
print(max(__lowerCamelCase ) )
# Adjacency list of Graph
_lowercase : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 272
| 0
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __UpperCamelCase ( lowerCamelCase__ ):
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowerCamelCase_ ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowerCamelCase_ ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowerCamelCase_ ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(lowerCAmelCase )
BertModel.from_pretrained(lowerCAmelCase )
BertTokenizer.from_pretrained(lowerCAmelCase )
pipeline(task='''fill-mask''', model=lowerCAmelCase )
# baseline - just load from_pretrained with normal network
lowerCamelCase_ =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowerCamelCase_ =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase_ ='''1'''
lowerCamelCase_ =subprocess.run(lowerCAmelCase, env=lowerCAmelCase, check=lowerCAmelCase, capture_output=lowerCAmelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('''success''', result.stdout.decode() )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowerCamelCase_ ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowerCamelCase_ ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowerCamelCase_ ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(lowerCAmelCase )
BertModel.from_pretrained(lowerCAmelCase )
BertTokenizer.from_pretrained(lowerCAmelCase )
pipeline(task='''fill-mask''', model=lowerCAmelCase )
# baseline - just load from_pretrained with normal network
lowerCamelCase_ =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowerCamelCase_ =self.get_env()
lowerCamelCase_ =subprocess.run(lowerCAmelCase, env=lowerCAmelCase, check=lowerCAmelCase, capture_output=lowerCAmelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('''success''', result.stdout.decode() )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowerCamelCase_ ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowerCamelCase_ ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowerCamelCase_ =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowerCamelCase_ =self.get_env()
lowerCamelCase_ =subprocess.run(lowerCAmelCase, env=lowerCAmelCase, check=lowerCAmelCase, capture_output=lowerCAmelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('''success''', result.stdout.decode() )
# next emulate no network
lowerCamelCase_ =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase_ ='''1'''
lowerCamelCase_ =subprocess.run(lowerCAmelCase, env=lowerCAmelCase, check=lowerCAmelCase, capture_output=lowerCAmelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('''success''', result.stdout.decode() )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''
from transformers import pipeline
'''
lowerCamelCase_ ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowerCamelCase_ ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowerCamelCase_ =self.get_env()
lowerCamelCase_ ='''1'''
lowerCamelCase_ =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowerCamelCase_ =subprocess.run(lowerCAmelCase, env=lowerCAmelCase, check=lowerCAmelCase, capture_output=lowerCAmelCase )
self.assertEqual(result.returncode, 1, result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''', result.stderr.decode().replace('''\n''', '''''' ), )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''
from transformers import AutoModel
'''
lowerCamelCase_ ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowerCamelCase_ =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowerCamelCase_ =self.get_env()
lowerCamelCase_ =subprocess.run(lowerCAmelCase, env=lowerCAmelCase, check=lowerCAmelCase, capture_output=lowerCAmelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('''success''', result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowerCamelCase_ ='''1'''
lowerCamelCase_ =subprocess.run(lowerCAmelCase, env=lowerCAmelCase, check=lowerCAmelCase, capture_output=lowerCAmelCase )
self.assertEqual(result.returncode, 0, result.stderr )
self.assertIn('''success''', result.stdout.decode() )
| 75
|
'''simple docstring'''
def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ =''''''
for i in table:
res += inp[i - 1]
return res
def a_ ( __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
return data[1:] + data[0]
def a_ ( __snake_case : str , __snake_case : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ =''''''
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 )
lowerCamelCase_ =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =message[:4]
lowerCamelCase_ =message[4:]
lowerCamelCase_ =apply_table(__snake_case , __snake_case )
lowerCamelCase_ =xor(__snake_case , __snake_case )
lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741
lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] )
lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741
lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r
lowerCamelCase_ =apply_table(l + r , __snake_case )
lowerCamelCase_ =xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
a_ : Any = input("""Enter 10 bit key: """)
a_ : Any = input("""Enter 8 bit message: """)
a_ : str = [6, 3, 7, 4, 8, 5, 10, 9]
a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a_ : str = [2, 4, 3, 1]
a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7]
a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a_ : List[Any] = apply_table(key, paa_table)
a_ : str = temp[:5]
a_ : Optional[Any] = temp[5:]
a_ : Tuple = left_shift(left)
a_ : Optional[Any] = left_shift(right)
a_ : str = apply_table(left + right, pa_table)
a_ : Optional[Any] = left_shift(left)
a_ : Tuple = left_shift(right)
a_ : Union[str, Any] = left_shift(left)
a_ : List[str] = left_shift(right)
a_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
a_ : Optional[int] = apply_table(message, IP)
a_ : List[Any] = function(expansion, sa, sa, keya, temp)
a_ : str = temp[4:] + temp[:4]
a_ : List[str] = function(expansion, sa, sa, keya, temp)
a_ : Union[str, Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
a_ : Optional[int] = apply_table(CT, IP)
a_ : List[Any] = function(expansion, sa, sa, keya, temp)
a_ : int = temp[4:] + temp[:4]
a_ : int = function(expansion, sa, sa, keya, temp)
a_ : Optional[int] = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 75
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__snake_case = logging.getLogger(__name__)
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : str ):
"""simple docstring"""
if os.path.exists(_lowerCAmelCase ):
if os.path.exists(os.path.join(_lowerCAmelCase, '''config.json''' ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase, '''config.json''' ) ):
os.remove(os.path.join(_lowerCAmelCase, '''config.json''' ) )
if os.path.exists(os.path.join(_lowerCAmelCase, '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase, '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_lowerCAmelCase, '''pytorch_model.bin''' ) )
else:
os.makedirs(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict=False ):
"""simple docstring"""
_a = 2
if unlogit:
_a = torch.pow(_lowerCAmelCase, _lowerCAmelCase )
_a = p * torch.log(_lowerCAmelCase )
_a = 0
return -plogp.sum(dim=-1 )
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'{x + 1}' for x in range(len(_lowerCAmelCase ) ) ) )
for row in range(len(_lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '''\t'''.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : List[str], _lowerCAmelCase : List[str], _lowerCAmelCase : Any=True, _lowerCAmelCase : int=True, _lowerCAmelCase : Any=None, _lowerCAmelCase : str=False ):
"""simple docstring"""
_a , _a = model.config.num_hidden_layers, model.config.num_attention_heads
_a = torch.zeros(_lowerCAmelCase, _lowerCAmelCase ).to(args.device )
_a = torch.zeros(_lowerCAmelCase, _lowerCAmelCase ).to(args.device )
if head_mask is None:
_a = torch.ones(_lowerCAmelCase, _lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a = None
_a = 0.0
_a = 0.0
for step, inputs in enumerate(tqdm(_lowerCAmelCase, desc='''Iteration''', disable=args.local_rank not in [-1, 0] ) ):
_a = tuple(t.to(args.device ) for t in inputs )
((_a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a = model(_lowerCAmelCase, labels=_lowerCAmelCase, head_mask=_lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a , _a , _a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCAmelCase ):
_a = entropy(attn.detach(), _lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a = 2
_a = torch.pow(torch.pow(_lowerCAmelCase, _lowerCAmelCase ).sum(-1 ), 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
_a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_lowerCAmelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_lowerCAmelCase )
logger.info('''Head ranked by importance scores''' )
_a = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device )
_a = torch.arange(
head_importance.numel(), device=args.device )
_a = head_ranks.view_as(_lowerCAmelCase )
print_ad_tensor(_lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : str, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a , _a , _a = compute_heads_importance(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, compute_entropy=_lowerCAmelCase )
_a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''', _lowerCAmelCase, original_score * args.masking_threshold )
_a = torch.ones_like(_lowerCAmelCase )
_a = max(1, int(new_head_mask.numel() * args.masking_amount ) )
_a = original_score
while current_score >= original_score * args.masking_threshold:
_a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a = float('''Inf''' )
_a = head_importance.view(-1 ).sort()[1]
if len(_lowerCAmelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''', str(current_heads_to_mask.tolist() ) )
_a = new_head_mask.view(-1 )
_a = 0.0
_a = new_head_mask.view_as(_lowerCAmelCase )
_a = new_head_mask.clone().detach()
print_ad_tensor(_lowerCAmelCase )
# Compute metric and head importance again
_a , _a , _a = compute_heads_importance(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, compute_entropy=_lowerCAmelCase, head_mask=_lowerCAmelCase )
_a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''', _lowerCAmelCase, new_head_mask.sum(), new_head_mask.sum() / new_head_mask.numel() * 1_00, )
logger.info('''Final head mask''' )
print_ad_tensor(_lowerCAmelCase )
np.save(os.path.join(args.output_dir, '''head_mask.npy''' ), head_mask.detach().cpu().numpy() )
return head_mask
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : List[str], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : str ):
"""simple docstring"""
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, compute_entropy=_lowerCAmelCase, compute_importance=_lowerCAmelCase, head_mask=_lowerCAmelCase )
_a = 1 / loss
_a = datetime.now() - before_time
_a = sum(p.numel() for p in model.parameters() )
_a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCAmelCase, _lowerCAmelCase ):
_a = [
v,
]
assert sum(len(_lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCAmelCase )
_a = sum(p.numel() for p in model.parameters() )
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, compute_entropy=_lowerCAmelCase, compute_importance=_lowerCAmelCase, head_mask=_lowerCAmelCase, actually_pruned=_lowerCAmelCase, )
_a = 1 / loss
_a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''', _lowerCAmelCase, _lowerCAmelCase, pruned_num_params / original_num_params * 1_00, )
logger.info('''Pruning: score with masking: %f score with pruning: %f''', _lowerCAmelCase, _lowerCAmelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''', original_time / new_time * 1_00 )
save_model(_lowerCAmelCase, args.output_dir )
def A_ ( ):
"""simple docstring"""
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''', default=_lowerCAmelCase, type=_lowerCAmelCase, required=_lowerCAmelCase, help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''', )
parser.add_argument(
'''--model_name_or_path''', default=_lowerCAmelCase, type=_lowerCAmelCase, required=_lowerCAmelCase, help='''Path to pretrained model or model identifier from huggingface.co/models''', )
parser.add_argument(
'''--output_dir''', default=_lowerCAmelCase, type=_lowerCAmelCase, required=_lowerCAmelCase, help='''The output directory where the model predictions and checkpoints will be written.''', )
# Other parameters
parser.add_argument(
'''--config_name''', default='''''', type=_lowerCAmelCase, help='''Pretrained config name or path if not the same as model_name_or_path''', )
parser.add_argument(
'''--tokenizer_name''', default='''''', type=_lowerCAmelCase, help='''Pretrained tokenizer name or path if not the same as model_name_or_path''', )
parser.add_argument(
'''--cache_dir''', default=_lowerCAmelCase, type=_lowerCAmelCase, help='''Where do you want to store the pre-trained models downloaded from s3''', )
parser.add_argument(
'''--data_subset''', type=_lowerCAmelCase, default=-1, help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''', action='''store_true''', help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''', action='''store_true''', help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''', action='''store_true''', help='''Don\'t normalize all importance scores between 0 and 1''', )
parser.add_argument(
'''--try_masking''', action='''store_true''', help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''', default=0.9, type=_lowerCAmelCase, help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''', )
parser.add_argument(
'''--masking_amount''', default=0.1, type=_lowerCAmelCase, help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''', default='''acc''', type=_lowerCAmelCase, help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''', default=1_28, type=_lowerCAmelCase, help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
), )
parser.add_argument('''--batch_size''', default=1, type=_lowerCAmelCase, help='''Batch size.''' )
parser.add_argument('''--seed''', type=_lowerCAmelCase, default=42 )
parser.add_argument('''--local_rank''', type=_lowerCAmelCase, default=-1, help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''', action='''store_true''', help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''', type=_lowerCAmelCase, default='''''', help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''', type=_lowerCAmelCase, default='''''', help='''Can be used for distant debugging.''' )
_a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=_lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_a = torch.device('''cuda''', args.local_rank )
_a = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device, args.n_gpu, bool(args.local_rank != -1 ) ) )
_a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_a = nn.parallel.DistributedDataParallel(
_lowerCAmelCase, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=_lowerCAmelCase )
elif args.n_gpu > 1:
_a = nn.DataParallel(_lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir, exist_ok=_lowerCAmelCase )
torch.save(_lowerCAmelCase, os.path.join(args.output_dir, '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''', _lowerCAmelCase )
# Prepare dataset
_a = np.concatenate(
[
np.loadtxt(args.data_dir, dtype=np.intaa ),
] )
_a = (torch.from_numpy(_lowerCAmelCase ),)
_a = TensorDataset(*_lowerCAmelCase )
_a = RandomSampler(_lowerCAmelCase )
_a = DataLoader(_lowerCAmelCase, sampler=_lowerCAmelCase, batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a = mask_heads(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
prune_heads(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
if __name__ == "__main__":
main()
| 153
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[Any]:
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
_a = field
_a = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
_a = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def _UpperCAmelCase ( self ) -> str:
# Build iterable dataset
if self.streaming:
_a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_a = None
_a = None
_a = None
_a = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
_a = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
_a = dataset
_a = path_or_buf
_a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a = num_proc
_a = '''utf-8'''
_a = to_json_kwargs
def _UpperCAmelCase ( self ) -> int:
_a = self.to_json_kwargs.pop('''path_or_buf''' , __UpperCAmelCase )
_a = self.to_json_kwargs.pop('''orient''' , '''records''' )
_a = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_a = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_a = self.to_json_kwargs.pop('''compression''' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__UpperCAmelCase ) as buffer:
_a = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
''' was passed. Please provide a local path instead.''' )
_a = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
_a , _a , _a , _a , _a = args
_a = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_a = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ) -> int:
_a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_a = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
_a , _a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 153
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__SCREAMING_SNAKE_CASE =False
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Optional[Any] = 12
lowercase_ : Dict = 12
lowercase_ : str = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
lowercase_ : Tuple = TransformeraDModel(**__UpperCamelCase )
return model
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : int = 'cpu'
lowercase_ : Tuple = self.dummy_vqvae
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Union[str, Any] = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_transformer
lowercase_ : str = VQDiffusionScheduler(self.num_embed )
lowercase_ : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(learnable=__UpperCamelCase )
lowercase_ : Tuple = VQDiffusionPipeline(
vqvae=__UpperCamelCase ,text_encoder=__UpperCamelCase ,tokenizer=__UpperCamelCase ,transformer=__UpperCamelCase ,scheduler=__UpperCamelCase ,learned_classifier_free_sampling_embeddings=__UpperCamelCase ,)
lowercase_ : Optional[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : List[str] = 'teddy bear playing in the pool'
lowercase_ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
lowercase_ : Optional[int] = pipe([prompt] ,generator=__UpperCamelCase ,num_inference_steps=2 ,output_type='np' )
lowercase_ : Optional[int] = output.images
lowercase_ : int = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
lowercase_ : List[Any] = pipe(
[prompt] ,generator=__UpperCamelCase ,output_type='np' ,return_dict=__UpperCamelCase ,num_inference_steps=2 )[0]
lowercase_ : str = image[0, -3:, -3:, -1]
lowercase_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase_ : Optional[Any] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = 'cpu'
lowercase_ : int = self.dummy_vqvae
lowercase_ : Optional[Any] = self.dummy_text_encoder
lowercase_ : Union[str, Any] = self.dummy_tokenizer
lowercase_ : int = self.dummy_transformer
lowercase_ : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
lowercase_ : Dict = LearnedClassifierFreeSamplingEmbeddings(
learnable=__UpperCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
lowercase_ : Dict = VQDiffusionPipeline(
vqvae=__UpperCamelCase ,text_encoder=__UpperCamelCase ,tokenizer=__UpperCamelCase ,transformer=__UpperCamelCase ,scheduler=__UpperCamelCase ,learned_classifier_free_sampling_embeddings=__UpperCamelCase ,)
lowercase_ : Optional[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Dict = 'teddy bear playing in the pool'
lowercase_ : List[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
lowercase_ : List[str] = pipe([prompt] ,generator=__UpperCamelCase ,num_inference_steps=2 ,output_type='np' )
lowercase_ : Optional[int] = output.images
lowercase_ : Optional[int] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
lowercase_ : int = pipe(
[prompt] ,generator=__UpperCamelCase ,output_type='np' ,return_dict=__UpperCamelCase ,num_inference_steps=2 )[0]
lowercase_ : str = image[0, -3:, -3:, -1]
lowercase_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowercase_ : Optional[int] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
lowercase_ : Dict = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
lowercase_ : Optional[Any] = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowercase_ : Any = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
lowercase_ : Optional[Any] = pipeline(
'teddy bear playing in the pool' ,num_images_per_prompt=1 ,generator=__UpperCamelCase ,output_type='np' ,)
lowercase_ : str = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 213
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE =[
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 213
| 1
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str=13 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=False , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Tuple=99 , lowerCAmelCase : Dict=0 , lowerCAmelCase : str=32 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : str=5_12 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Tuple="last" , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=0 , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Tuple = seq_length
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : Any = use_input_lengths
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : int = gelu_activation
__lowerCAmelCase : Dict = sinusoidal_embeddings
__lowerCAmelCase : Dict = causal
__lowerCAmelCase : Optional[int] = asm
__lowerCAmelCase : str = n_langs
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : Optional[Any] = n_special
__lowerCAmelCase : Optional[Any] = hidden_size
__lowerCAmelCase : Optional[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Optional[Any] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = type_sequence_label_size
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : List[Any] = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Any = summary_type
__lowerCAmelCase : Optional[int] = use_proj
__lowerCAmelCase : Optional[int] = scope
__lowerCAmelCase : Optional[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : List[Any] = None
if self.use_input_lengths:
__lowerCAmelCase : Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase : int = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : List[Any] = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Any = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[Any] = model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowerCAmelCase : Tuple = model(lowerCAmelCase , langs=lowerCAmelCase )
__lowerCAmelCase : Any = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowerCAmelCase : str = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Dict = model(lowerCAmelCase )
__lowerCAmelCase : List[str] = model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowerCAmelCase) ,) : List[str] = result_with_labels.to_tuple()
__lowerCAmelCase : Tuple = model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowerCAmelCase) ,) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Dict = model(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : Tuple = XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[str] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_choices
__lowerCAmelCase : Dict = XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : int = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : int = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : Any = config_and_inputs
__lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase : Optional[int] =(
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : str ) -> Any:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]=False ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = XLMModelTester(self )
__lowerCAmelCase : Tuple = ConfigTester(self , config_class=lowerCAmelCase , emb_dim=37 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : List[Any]=1 ) -> List[str]:
"""simple docstring"""
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase : Optional[Any] = min_length + idx + 1
__lowerCAmelCase : Dict = min_length + idx + 1
__lowerCAmelCase : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=1 ) -> Dict:
"""simple docstring"""
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase : Dict = min_length + idx + 1
__lowerCAmelCase : List[str] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Tuple = XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCAmelCase )
__lowerCAmelCase : Dict = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowerCAmelCase : int = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase : Optional[int] = model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 139
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : List[str] , __A : str ) -> int:
__lowerCAmelCase : str = set()
__lowerCAmelCase : int = []
def parse_line(__A : List[Any] ):
for line in fp:
if isinstance(__A , __A ):
__lowerCAmelCase : str = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(__A ) > 0:
__lowerCAmelCase : Tuple = """\n""".join(__A )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(__A )
buffer.clear()
continue
else:
__lowerCAmelCase : Optional[int] = line.strip()
buffer.append(__A )
if from_gh:
for filename in os.listdir(__A ):
__lowerCAmelCase : Optional[Any] = os.path.join(__A , __A )
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with open(__A ) as fp:
parse_line(__A )
else:
try:
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__A ) as fp:
parse_line(__A )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def snake_case_ (__A : Dict , __A : Union[str, Any] ) -> Dict:
__lowerCAmelCase : Any = set()
__lowerCAmelCase : Optional[int] = [os.path.join(__A , __A ) for p in os.listdir(__A ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__A , __A ) )
return selected_warnings
if __name__ == "__main__":
def snake_case_ (__A : int ) -> Tuple:
return values.split(""",""" )
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCAmelCase = extract_warnings(args.output_dir, args.targets)
__UpperCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 139
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=False):
lowercase__ : List[str] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias'''))
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
])
return rename_keys
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : int=False):
for i in range(config.num_hidden_layers):
if base_model:
lowercase__ : str = ""
else:
lowercase__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : List[str] = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''')
lowercase__ : int = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : str = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : List[Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
lowercase__ : str = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]):
lowercase__ : Union[str, Any] = dct.pop(_lowerCamelCase)
lowercase__ : Union[str, Any] = val
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict):
lowercase__ : Union[str, Any] = ViTMSNConfig()
lowercase__ : Dict = 1000
lowercase__ : str = "datasets/huggingface/label-files"
lowercase__ : List[str] = "imagenet-1k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase) , "r"))
lowercase__ : str = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : str = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase__ : Any = 384
lowercase__ : Optional[Any] = 1536
lowercase__ : Any = 6
elif "l16" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : Any = 4096
lowercase__ : Any = 24
lowercase__ : List[str] = 16
lowercase__ : Any = 0.1
elif "b4" in checkpoint_url:
lowercase__ : int = 4
elif "l7" in checkpoint_url:
lowercase__ : Optional[int] = 7
lowercase__ : Dict = 1024
lowercase__ : Union[str, Any] = 4096
lowercase__ : str = 24
lowercase__ : Dict = 16
lowercase__ : Any = 0.1
lowercase__ : Union[str, Any] = ViTMSNModel(_lowerCamelCase)
lowercase__ : Dict = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")["target_encoder"]
lowercase__ : Dict = ViTImageProcessor(size=config.image_size)
remove_projection_head(_lowerCamelCase)
lowercase__ : Optional[Any] = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase)
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
lowercase__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
lowercase__ : Any = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase)
lowercase__ : Tuple = image_processor(images=_lowerCamelCase , return_tensors="pt")
# forward pass
torch.manual_seed(2)
lowercase__ : int = model(**_lowerCamelCase)
lowercase__ : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase__ : Optional[Any] = torch.tensor([[-1.0915, -1.4876, -1.1809]])
elif "b16" in checkpoint_url:
lowercase__ : Tuple = torch.tensor([[14.2889, -18.9045, 11.7281]])
elif "l16" in checkpoint_url:
lowercase__ : Tuple = torch.tensor([[41.5028, -22.8681, 45.6475]])
elif "b4" in checkpoint_url:
lowercase__ : Optional[int] = torch.tensor([[-4.3868, 5.2932, -0.4137]])
else:
lowercase__ : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]])
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1E-4)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 87
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:str = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Tuple = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__:List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: str , UpperCamelCase: int=13 , UpperCamelCase: Any=32 , UpperCamelCase: Optional[int]=3 , UpperCamelCase: int=4 , UpperCamelCase: str=[10, 20, 30, 40] , UpperCamelCase: Optional[int]=[2, 2, 3, 2] , UpperCamelCase: List[Any]=True , UpperCamelCase: Optional[int]=True , UpperCamelCase: Optional[Any]=37 , UpperCamelCase: str="gelu" , UpperCamelCase: Tuple=10 , UpperCamelCase: Optional[Any]=0.02 , UpperCamelCase: Tuple=["stage2", "stage3", "stage4"] , UpperCamelCase: Any=3 , UpperCamelCase: Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = type_sequence_label_size
A__ = initializer_range
A__ = out_features
A__ = num_labels
A__ = scope
A__ = num_stages
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Any ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCamelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCamelCase ( self: Any , UpperCamelCase: Dict , UpperCamelCase: Optional[int] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = UperNetForSemanticSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCAmelCase = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = UperNetModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
return
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Dict ):
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(UpperCamelCase )
A__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A__ = model_class(config=UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = UperNetForSemanticSegmentation.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def _snake_case ( ):
A__ = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
A__ = Image.open(UpperCAmelCase_ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
A__ = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(UpperCamelCase )
A__ = prepare_img()
A__ = processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
with torch.no_grad():
A__ = model(**UpperCamelCase )
A__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
A__ = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(UpperCamelCase )
A__ = prepare_img()
A__ = processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
with torch.no_grad():
A__ = model(**UpperCamelCase )
A__ = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
| 69
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : str = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "funnel"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self: str , UpperCamelCase: List[Any]=3_05_22 , UpperCamelCase: Any=[4, 4, 4] , UpperCamelCase: Dict=None , UpperCamelCase: Dict=2 , UpperCamelCase: Optional[int]=7_68 , UpperCamelCase: List[str]=12 , UpperCamelCase: Optional[Any]=64 , UpperCamelCase: str=30_72 , UpperCamelCase: Any="gelu_new" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: Tuple=0.1 , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: Tuple=1e-9 , UpperCamelCase: Tuple="mean" , UpperCamelCase: str="relative_shift" , UpperCamelCase: Any=True , UpperCamelCase: List[Any]=True , UpperCamelCase: int=True , **UpperCamelCase: List[str] , ):
"""simple docstring"""
A__ = vocab_size
A__ = block_sizes
A__ = [1] * len(UpperCamelCase ) if block_repeats is None else block_repeats
assert len(UpperCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
A__ = num_decoder_layers
A__ = d_model
A__ = n_head
A__ = d_head
A__ = d_inner
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = initializer_range
A__ = initializer_std
A__ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
A__ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
A__ = attention_type
A__ = separate_cls
A__ = truncate_seq
A__ = pool_q_only
super().__init__(**UpperCamelCase )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def UpperCamelCase ( self: Any , UpperCamelCase: Dict ):
"""simple docstring"""
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 69
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302
| 1
|
"""simple docstring"""
import itertools
import math
def __lowercase ( _a ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( ):
snake_case_ : str = 2
while True:
if is_prime(_a ):
yield num
num += 1
def __lowercase ( _a = 10_001 ):
return next(itertools.islice(prime_generator() , nth - 1 , _a ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 155
|
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger('''transformers.models.encodec''')
lowercase__ : Optional[int] = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowercase__ : Tuple = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowercase__ : List[str] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowercase__ : List[Any] = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowercase__ : int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowercase__ : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase__ : List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase__ : int = []
lowercase__ : Dict = []
def __lowercase ( _a , _a , _a , _a , _a ):
for attribute in key.split('''.''' ):
snake_case_ : Optional[Any] = getattr(_a , _a )
if weight_type is not None:
snake_case_ : Union[str, Any] = getattr(_a , _a ).shape
else:
snake_case_ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case_ : Dict = value
elif weight_type == "weight_g":
snake_case_ : List[Any] = value
elif weight_type == "weight_v":
snake_case_ : List[Any] = value
elif weight_type == "bias":
snake_case_ : Optional[Any] = value
elif weight_type == "running_mean":
snake_case_ : str = value
elif weight_type == "running_var":
snake_case_ : List[Any] = value
elif weight_type == "num_batches_tracked":
snake_case_ : Tuple = value
elif weight_type == "weight_ih_l0":
snake_case_ : Dict = value
elif weight_type == "weight_hh_l0":
snake_case_ : str = value
elif weight_type == "bias_ih_l0":
snake_case_ : str = value
elif weight_type == "bias_hh_l0":
snake_case_ : Dict = value
elif weight_type == "weight_ih_l1":
snake_case_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
snake_case_ : Dict = value
elif weight_type == "bias_ih_l1":
snake_case_ : List[str] = value
elif weight_type == "bias_hh_l1":
snake_case_ : Optional[int] = value
else:
snake_case_ : Dict = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __lowercase ( _a , _a ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_, snake_case_ : Tuple = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowercase ( _a , _a , _a ):
snake_case_ : str = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case_ : Any = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case_ : int = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(_a , _a ):
logger.info(f"{name} was ignored" )
continue
snake_case_ : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case_, snake_case_ : List[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
snake_case_ : Any = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
snake_case_ : str = True
if "*" in mapped_key:
snake_case_ : Optional[Any] = name.split(_a )[0].split('''.''' )[-2]
snake_case_ : str = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
snake_case_ : int = '''weight_g'''
elif "weight_v" in name:
snake_case_ : List[str] = '''weight_v'''
elif "weight_ih_l0" in name:
snake_case_ : List[Any] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
snake_case_ : Tuple = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
snake_case_ : Any = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
snake_case_ : Dict = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
snake_case_ : str = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
snake_case_ : List[Any] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
snake_case_ : List[Any] = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
snake_case_ : List[Any] = '''bias_hh_l1'''
elif "bias" in name:
snake_case_ : Optional[int] = '''bias'''
elif "weight" in name:
snake_case_ : str = '''weight'''
elif "running_mean" in name:
snake_case_ : Optional[int] = '''running_mean'''
elif "running_var" in name:
snake_case_ : int = '''running_var'''
elif "num_batches_tracked" in name:
snake_case_ : Optional[int] = '''num_batches_tracked'''
else:
snake_case_ : Optional[Any] = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def __lowercase ( _a , _a , _a , _a=None , _a=None , ):
if config_path is not None:
snake_case_ : Optional[int] = EncodecConfig.from_pretrained(_a )
else:
snake_case_ : str = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case_ : Union[str, Any] = [8, 5, 4, 4]
snake_case_ : Optional[int] = [2.2]
snake_case_ : Any = 64
snake_case_ : Dict = 32_000
snake_case_ : int = 2_048
snake_case_ : int = False
snake_case_ : Optional[int] = False
snake_case_ : Optional[int] = False
elif model_name == "encodec_48khz":
snake_case_ : List[str] = [8, 5, 4, 2]
snake_case_ : List[Any] = [3.0, 6.0, 12.0, 24.0]
snake_case_ : Any = 48_000
snake_case_ : List[str] = 2
snake_case_ : int = False
snake_case_ : str = '''time_group_norm'''
snake_case_ : int = True
snake_case_ : List[str] = 1.0
snake_case_ : Tuple = 0.01
else:
raise ValueError(f"Unknown model name: {model_name}" )
snake_case_ : Any = EncodecModel(_a )
snake_case_ : str = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_a )
snake_case_ : Optional[Any] = torch.load(_a )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case_ : Union[str, Any] = original_checkpoint['''best_state''']
recursively_load_weights(_a , _a , _a )
model.save_pretrained(_a )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_a )
model.push_to_hub(_a )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 155
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __lowercase ( a_ , a_ ):
"""simple docstring"""
UpperCamelCase : int = "resnet"
UpperCamelCase : Dict = ["basic", "bottleneck"]
def __init__( self , A=3 , A=64 , A=[2_56, 5_12, 10_24, 20_48] , A=[3, 4, 6, 3] , A="bottleneck" , A="relu" , A=False , A=None , A=None , **A , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
lowerCamelCase = num_channels
lowerCamelCase = embedding_size
lowerCamelCase = hidden_sizes
lowerCamelCase = depths
lowerCamelCase = layer_type
lowerCamelCase = hidden_act
lowerCamelCase = downsample_in_first_stage
lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(A ) + 1 )]
lowerCamelCase , lowerCamelCase = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = version.parse("1.11" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1e-3
| 252
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
UpperCAmelCase, UpperCAmelCase : Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
UpperCAmelCase : Dict = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
UpperCAmelCase : str = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCAmelCase : str = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 252
| 1
|
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCAmelCase__ = 6378137.0
UpperCAmelCase__ = 6356752.314245
UpperCAmelCase__ = 6378137
def _a ( a :float , a :float , a :float , a :float ) -> float:
a = (AXIS_A - AXIS_B) / AXIS_A
a = atan((1 - flattening) * tan(radians(a ) ) )
a = atan((1 - flattening) * tan(radians(a ) ) )
a = radians(a )
a = radians(a )
# Equation
a = sin((phi_a - phi_a) / 2 )
a = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
a = sqrt(sin_sq_phi + (cos(a ) * cos(a ) * sin_sq_lambda) )
return 2 * RADIUS * asin(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : Optional[int] , ) ->List[str]:
"""simple docstring"""
super().__init__()
a = value_function
a = unet
a = scheduler
a = env
a = env.get_dataset()
a = {}
for key in self.data.keys():
try:
a = self.data[key].mean()
except: # noqa: E722
pass
a = {}
for key in self.data.keys():
try:
a = self.data[key].std()
except: # noqa: E722
pass
a = env.observation_space.shape[0]
a = env.action_space.shape[0]
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) ->Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) ->List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __lowerCAmelCase ( self : int , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
if type(__UpperCAmelCase ) is dict:
return {k: self.to_torch(__UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__UpperCAmelCase , device=self.unet.device )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) ->int:
"""simple docstring"""
for key, val in cond.items():
a = val.clone()
return x_in
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = x.shape[0]
a = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
a = torch.full((batch_size,) , __UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
a = self.value_function(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample
a = torch.autograd.grad([y.sum()] , [x] )[0]
a = self.scheduler._get_variance(__UpperCAmelCase )
a = torch.exp(0.5 * posterior_variance )
a = model_std * grad
a = 0
a = x.detach()
a = x + scale * grad
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.unet(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
a = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , predict_epsilon=__UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
return x, y
def __call__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : str=0.1 ) ->List[str]:
"""simple docstring"""
a = self.normalize(__UpperCAmelCase , '''observations''' )
a = obs[None].repeat(__UpperCAmelCase , axis=0 )
a = {0: self.to_torch(__UpperCAmelCase )}
a = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
a = randn_tensor(__UpperCAmelCase , device=self.unet.device )
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
# run the diffusion process
a , a = self.run_diffusion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# sort output trajectories by value
a = y.argsort(0 , descending=__UpperCAmelCase ).squeeze()
a = x[sorted_idx]
a = sorted_values[:, :, : self.action_dim]
a = actions.detach().cpu().numpy()
a = self.de_normalize(__UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
a = 0
else:
# if we didn't run value guiding, select a random action
a = np.random.randint(0 , __UpperCAmelCase )
a = denorm_actions[selected_index, 0]
return denorm_actions
| 26
| 0
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE__ = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case = test_results.split(' ' )
snake_case = 0
snake_case = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case = {}
snake_case = None
snake_case = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , _UpperCamelCase ):
snake_case = True
snake_case = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
snake_case = line
snake_case = False
return failures
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = title
snake_case = doc_test_results['time_spent'].split(',' )[0]
snake_case = doc_test_results['success']
snake_case = doc_test_results['failures']
snake_case = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case = doc_test_results
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = [self._time_spent]
snake_case = 0
for time in time_spent:
snake_case = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase ) == 1:
snake_case = [0, 0, time_parts[0]]
snake_case ,snake_case ,snake_case = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
snake_case ,snake_case ,snake_case = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F"""{int(lowerCAmelCase )}h{int(lowerCAmelCase )}m{int(lowerCAmelCase )}s"""
@property
def snake_case ( self ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = 40
snake_case = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase , lowerCAmelCase )}
snake_case = ''
for category, failures in category_failures.items():
if len(lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase )
@staticmethod
def snake_case ( ):
"""simple docstring"""
snake_case = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=lowerCAmelCase , )
def snake_case ( self ):
"""simple docstring"""
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
snake_case = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
snake_case = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=lowerCAmelCase , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = ''
for key, value in failures.items():
snake_case = value[:2_00] + ' [Truncated]' if len(lowerCAmelCase ) > 2_50 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
snake_case = job_name
snake_case = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
snake_case = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case ( self ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
snake_case = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
snake_case = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
snake_case = F"""*Num failures* :{len(job_result["failed"] )} \n"""
snake_case = job_result['failures']
snake_case = self.get_reply_blocks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text=lowerCAmelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F"""Results for {job}""" , blocks=lowerCAmelCase , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = os.environ['GITHUB_RUN_ID']
snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
snake_case = requests.get(_UpperCamelCase ).json()
snake_case = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(_UpperCamelCase ):
snake_case = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _UpperCamelCase )
return {}
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
snake_case = {}
if os.path.exists(_UpperCamelCase ):
snake_case = os.listdir(_UpperCamelCase )
for file in files:
try:
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , encoding='utf-8' ) as f:
snake_case = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(_UpperCamelCase , _UpperCamelCase )}.""" ) from e
return _artifact
def lowerCAmelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = name
snake_case = []
def __str__( self ):
"""simple docstring"""
return self.name
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.paths.append({'name': self.name, 'path': path} )
snake_case = {}
snake_case = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case = directory
if artifact_name not in _available_artifacts:
snake_case = Artifact(_UpperCamelCase )
_available_artifacts[artifact_name].add_path(_UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_job_links()
SCREAMING_SNAKE_CASE__ = retrieve_available_artifacts()
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
SCREAMING_SNAKE_CASE__ = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
SCREAMING_SNAKE_CASE__ = github_actions_job_links.get("run_doctests")
SCREAMING_SNAKE_CASE__ = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
SCREAMING_SNAKE_CASE__ = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = handle_test_results(artifact["stats"])
SCREAMING_SNAKE_CASE__ = failed
SCREAMING_SNAKE_CASE__ = success
SCREAMING_SNAKE_CASE__ = time_spent[1:-1] + ", "
SCREAMING_SNAKE_CASE__ = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
SCREAMING_SNAKE_CASE__ = line.replace("FAILED ", "")
SCREAMING_SNAKE_CASE__ = line.split()[0].replace("\n", "")
if "::" in line:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line.split("::")
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
SCREAMING_SNAKE_CASE__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
SCREAMING_SNAKE_CASE__ = all_failures[test] if test in all_failures else "N/A"
SCREAMING_SNAKE_CASE__ = failure
break
SCREAMING_SNAKE_CASE__ = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 150
| 0
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowercase__ ( snake_case_ :int ):
# A local function to see if a dot lands in the circle.
def is_in_circle(snake_case_ :float , snake_case_ :float ) -> bool:
__UpperCAmelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCAmelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
__UpperCAmelCase = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def lowercase__ ( snake_case_ :int , snake_case_ :Callable[[float], float] , snake_case_ :float = 0.0 , snake_case_ :float = 1.0 , ):
return mean(
function_to_integrate(uniform(snake_case_ , snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowercase__ ( snake_case_ :int , snake_case_ :float = 0.0 , snake_case_ :float = 1.0 ):
def identity_function(snake_case_ :float ) -> float:
return x
__UpperCAmelCase = area_under_curve_estimator(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def lowercase__ ( snake_case_ :int ):
def function_to_integrate(snake_case_ :float ) -> float:
return sqrt(4.0 - x * x )
__UpperCAmelCase = area_under_curve_estimator(
snake_case_ , snake_case_ , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : int = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "bloom"
a__ : List[Any] = ["past_key_values"]
a__ : Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Union[str, Any] , _lowercase : Dict=25_08_80 , _lowercase : str=64 , _lowercase : int=2 , _lowercase : Union[str, Any]=8 , _lowercase : Optional[Any]=1E-5 , _lowercase : Dict=0.02 , _lowercase : Optional[int]=True , _lowercase : Any=1 , _lowercase : Dict=2 , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=0.0 , _lowercase : str=0.0 , _lowercase : str=1 , _lowercase : int=False , **_lowercase : List[str] , ):
__UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
__UpperCAmelCase = kwargs.pop('''n_embed''' , _lowercase )
__UpperCAmelCase = hidden_size if n_embed is None else n_embed
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = use_cache
__UpperCAmelCase = pretraining_tp
__UpperCAmelCase = apply_residual_connection_post_layernorm
__UpperCAmelCase = hidden_dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = slow_but_exact
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = version.parse("1.12" )
def __init__( self : Optional[int] , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ):
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , '''pad_token_id''' , _lowercase ):
# TODO: how to do that better?
__UpperCAmelCase = 0
@property
def a ( self : Optional[int] ):
__UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' , inverted_values_shape=_lowercase )
__UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a ( self : Any ):
return self._config.n_layer
@property
def a ( self : Tuple ):
return self._config.n_head
@property
def a ( self : Dict ):
return 1E-3
def a ( self : List[str] , _lowercase : "PreTrainedTokenizer" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional["TensorType"] = None , ):
__UpperCAmelCase = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase = seqlen + 2
__UpperCAmelCase = self._config.hidden_size // self.num_attention_heads
__UpperCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__UpperCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__UpperCAmelCase = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def a ( self : Any ):
return 13
| 86
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : int = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 115
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = [[0 for _ in range(UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
a_ = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCamelCase_ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCamelCase_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 243
| 0
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def _snake_case ( *a_ ,**a_ ) -> Dict:
pass
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _snake_case ( self ,a_ ,a_ ,a_ ) -> List[Any]:
_UpperCAmelCase : Optional[int] = DepthEstimationPipeline(model=a_ ,image_processor=a_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : int = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} ,a_ )
import datasets
_UpperCAmelCase : List[Any] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" ,"""image""" ,split="""test""" )
_UpperCAmelCase : List[str] = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] ,a_ ,)
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def _snake_case ( self ) -> str:
pass
@slow
@require_torch
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : Optional[int] = """Intel/dpt-large"""
_UpperCAmelCase : Tuple = pipeline("""depth-estimation""" ,model=a_ )
_UpperCAmelCase : List[str] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
_UpperCAmelCase : List[Any] = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) ,29.304 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) ,2.662 )
@require_torch
def _snake_case ( self ) -> str:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 349
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349
| 1
|
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCamelCase__ = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCamelCase__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCamelCase__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCamelCase__ = re.compile(r"""^\s*else:""")
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
__a = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__a = f.readlines()
__a = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__a = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
__a = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
__a = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
__a = re.findall("""\[([^\]]+)\]""" , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
__a = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__a = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
__a = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__a = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__a = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__a = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
__a = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
__a = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
__a = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
__a = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
__a = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
__a = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__a = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
__a = lines[line_index]
__a = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
__a = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__a = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__a = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__a = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
__a = lines[line_index]
__a = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
__a = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
def find_duplicates(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__a = []
for key in import_dict_objects.keys():
__a = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
__a = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__a = """base imports""" if key == """none""" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__a = os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" )
__a = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
__a = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__a = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("""\n\n""".join(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob("""*.py""" ) ) ) == 0:
continue
__a = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
__a = short_path.replace(os.path.sep , """.""" )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__a = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
__a = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
lowerCamelCase__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__a = spec.loader.load_module()
__a = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
__a = """\n""".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 302
|
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = '''gpt_bigcode'''
lowerCamelCase_ : Any = ['''past_key_values''']
lowerCamelCase_ : Union[str, Any] = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , __magic_name__=5_0257 , __magic_name__=1024 , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=None , __magic_name__="gelu_pytorch_tanh" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0256 , __magic_name__=5_0256 , __magic_name__=True , __magic_name__=True , __magic_name__=True , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = vocab_size
snake_case_ : Dict = n_positions
snake_case_ : Union[str, Any] = n_embd
snake_case_ : Tuple = n_layer
snake_case_ : Optional[int] = n_head
snake_case_ : int = n_inner
snake_case_ : Optional[Any] = activation_function
snake_case_ : Dict = resid_pdrop
snake_case_ : Optional[int] = embd_pdrop
snake_case_ : Tuple = attn_pdrop
snake_case_ : str = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : Optional[Any] = scale_attn_weights
snake_case_ : Optional[int] = use_cache
snake_case_ : List[str] = attention_softmax_in_fpaa
snake_case_ : Optional[Any] = scale_attention_softmax_in_fpaa
snake_case_ : Dict = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Tuple = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 279
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 279
| 1
|
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_)
dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_)
for path in src_dir.iterdir():
__lowerCAmelCase = [x.rstrip() for x in list(path.open().readlines())][:n]
__lowerCAmelCase = dest_dir.joinpath(path.name)
print(SCREAMING_SNAKE_CASE_)
dest_path.open('''w''').write('''\n'''.join(SCREAMING_SNAKE_CASE_))
if __name__ == "__main__":
fire.Fire(minify)
| 174
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __a():
'''simple docstring'''
_lowerCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Parse args
_lowerCAmelCase , _lowerCAmelCase = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
_lowerCAmelCase = parse_unknown_args(SCREAMING_SNAKE_CASE_ )
# Run
_lowerCAmelCase = args.func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 158
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class a__ ( __snake_case , __snake_case ):
A__ : Optional[Any] = 'bit'
A__ : Dict = ['preactivation', 'bottleneck']
A__ : Dict = ['SAME', 'VALID']
def __init__( self , UpperCAmelCase=3 , UpperCAmelCase=6_4 , UpperCAmelCase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , UpperCAmelCase=[3, 4, 6, 3] , UpperCAmelCase="preactivation" , UpperCAmelCase="relu" , UpperCAmelCase=None , UpperCAmelCase=3_2 , UpperCAmelCase=0.0 , UpperCAmelCase=False , UpperCAmelCase=3_2 , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__a = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__a = num_channels
__a = embedding_size
__a = hidden_sizes
__a = depths
__a = layer_type
__a = hidden_act
__a = global_padding
__a = num_groups
__a = drop_path_rate
__a = embedding_dynamic_padding
__a = output_stride
__a = width_factor
__a = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase ) + 1 )]
__a , __a = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 360
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a__ ( __snake_case ):
A__ : Any = 'Wav2Vec2FeatureExtractor'
A__ : str = 'AutoTokenizer'
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
super().__init__(UpperCAmelCase , UpperCAmelCase )
__a = self.feature_extractor
__a = False
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
try:
return super().from_pretrained(UpperCAmelCase , **UpperCAmelCase )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , UpperCAmelCase , )
__a = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__a = WavaVecaCTCTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase , **UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__a = kwargs.pop('raw_speech' )
else:
__a = kwargs.pop('audio' , UpperCAmelCase )
__a = kwargs.pop('sampling_rate' , UpperCAmelCase )
__a = kwargs.pop('text' , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__a = self.feature_extractor(UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase )
if text is not None:
__a = self.tokenizer(UpperCAmelCase , **UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings['input_ids']
return inputs
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCAmelCase , **UpperCAmelCase )
__a = kwargs.pop('input_features' , UpperCAmelCase )
__a = kwargs.pop('labels' , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
__a = args[0]
__a = args[1:]
if input_features is not None:
__a = self.feature_extractor.pad(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
if labels is not None:
__a = self.tokenizer.pad(UpperCAmelCase , **UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__a = labels['input_ids']
return input_features
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__a = True
__a = self.tokenizer
yield
__a = self.feature_extractor
__a = False
| 197
| 0
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( _a , _a ):
@register_to_config
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int = 128 ,SCREAMING_SNAKE_CASE__ : int = 256 ,SCREAMING_SNAKE_CASE__ : float = 2_000.0 ,SCREAMING_SNAKE_CASE__ : int = 768 ,SCREAMING_SNAKE_CASE__ : int = 12 ,SCREAMING_SNAKE_CASE__ : int = 12 ,SCREAMING_SNAKE_CASE__ : int = 64 ,SCREAMING_SNAKE_CASE__ : int = 2_048 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,):
super().__init__()
SCREAMING_SNAKE_CASE:Any = nn.Sequential(
nn.Linear(SCREAMING_SNAKE_CASE__ ,d_model * 4 ,bias=SCREAMING_SNAKE_CASE__ ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=SCREAMING_SNAKE_CASE__ ) ,nn.SiLU() ,)
SCREAMING_SNAKE_CASE:str = nn.Embedding(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = False
SCREAMING_SNAKE_CASE:str = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE__ ):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE:Tuple = DecoderLayer(d_model=SCREAMING_SNAKE_CASE__ ,d_kv=SCREAMING_SNAKE_CASE__ ,num_heads=SCREAMING_SNAKE_CASE__ ,d_ff=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__ )
self.decoders.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = TaLayerNorm(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = nn.Dropout(p=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:int = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE:Optional[int] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
SCREAMING_SNAKE_CASE:Optional[int] = self.conditioning_emb(SCREAMING_SNAKE_CASE__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE:List[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE:List[Any] = torch.broadcast_to(
torch.arange(SCREAMING_SNAKE_CASE__ ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
SCREAMING_SNAKE_CASE:List[Any] = self.position_encoding(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = self.continuous_inputs_projection(SCREAMING_SNAKE_CASE__ )
inputs += position_encodings
SCREAMING_SNAKE_CASE:List[Any] = self.dropout(SCREAMING_SNAKE_CASE__ )
# decoder: No padding present.
SCREAMING_SNAKE_CASE:Optional[Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE:List[str] = [(x, self.encoder_decoder_mask(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE:List[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
SCREAMING_SNAKE_CASE:Optional[int] = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
SCREAMING_SNAKE_CASE:str = lyr(
SCREAMING_SNAKE_CASE__ ,conditioning_emb=SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,encoder_attention_mask=SCREAMING_SNAKE_CASE__ ,)[0]
SCREAMING_SNAKE_CASE:str = self.decoder_norm(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = self.post_dropout(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = self.spec_out(SCREAMING_SNAKE_CASE__ )
return spec_out
class _snake_case ( nn.Module ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple=1e-6 ):
super().__init__()
SCREAMING_SNAKE_CASE:Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=SCREAMING_SNAKE_CASE__ ,d_kv=SCREAMING_SNAKE_CASE__ ,num_heads=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=SCREAMING_SNAKE_CASE__ ,d_kv=SCREAMING_SNAKE_CASE__ ,num_heads=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__ ,layer_norm_epsilon=SCREAMING_SNAKE_CASE__ ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=SCREAMING_SNAKE_CASE__ ,d_ff=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__ ,layer_norm_epsilon=SCREAMING_SNAKE_CASE__ ) )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.layer[0](
SCREAMING_SNAKE_CASE__ ,conditioning_emb=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,)
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE:Any = torch.where(encoder_attention_mask > 0 ,0 ,-1e10 ).to(
encoder_hidden_states.dtype )
SCREAMING_SNAKE_CASE:Tuple = self.layer[1](
SCREAMING_SNAKE_CASE__ ,key_value_states=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,)
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE:Union[str, Any] = self.layer[-1](SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return (hidden_states,)
class _snake_case ( nn.Module ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE:str = TaLayerNorm(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = Attention(query_dim=SCREAMING_SNAKE_CASE__ ,heads=SCREAMING_SNAKE_CASE__ ,dim_head=SCREAMING_SNAKE_CASE__ ,out_bias=SCREAMING_SNAKE_CASE__ ,scale_qk=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = nn.Dropout(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,):
# pre_self_attention_layer_norm
SCREAMING_SNAKE_CASE:str = self.layer_norm(SCREAMING_SNAKE_CASE__ )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE:List[str] = self.FiLMLayer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Self-attention block
SCREAMING_SNAKE_CASE:Tuple = self.attention(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__ )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE:Tuple = Attention(query_dim=SCREAMING_SNAKE_CASE__ ,heads=SCREAMING_SNAKE_CASE__ ,dim_head=SCREAMING_SNAKE_CASE__ ,out_bias=SCREAMING_SNAKE_CASE__ ,scale_qk=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE__ ,eps=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = nn.Dropout(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,):
SCREAMING_SNAKE_CASE:str = self.layer_norm(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = self.attention(
SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,attention_mask=attention_mask.squeeze(1 ) ,)
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__ )
return layer_output
class _snake_case ( nn.Module ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
super().__init__()
SCREAMING_SNAKE_CASE:str = TaDenseGatedActDense(d_model=SCREAMING_SNAKE_CASE__ ,d_ff=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = TaFiLMLayer(in_features=d_model * 4 ,out_features=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = TaLayerNorm(SCREAMING_SNAKE_CASE__ ,eps=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = nn.Dropout(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Tuple=None ):
SCREAMING_SNAKE_CASE:Any = self.layer_norm(SCREAMING_SNAKE_CASE__ )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE:Optional[int] = self.film(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = self.DenseReluDense(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__ )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ):
super().__init__()
SCREAMING_SNAKE_CASE:Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = nn.Dropout(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = NewGELUActivation()
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:int = self.act(self.wi_a(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:int = self.wi_a(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE:List[str] = self.dropout(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = self.wo(SCREAMING_SNAKE_CASE__ )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int=1e-6 ):
super().__init__()
SCREAMING_SNAKE_CASE:Any = nn.Parameter(torch.ones(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:List[Any] = eps
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Any ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE:Dict = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _snake_case ( nn.Module ):
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(SCREAMING_SNAKE_CASE__ ,3.0 )) ))
class _snake_case ( nn.Module ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ):
super().__init__()
SCREAMING_SNAKE_CASE:Any = nn.Linear(SCREAMING_SNAKE_CASE__ ,out_features * 2 ,bias=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ):
SCREAMING_SNAKE_CASE:Optional[Any] = self.scale_bias(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = torch.chunk(SCREAMING_SNAKE_CASE__ ,2 ,-1 )
SCREAMING_SNAKE_CASE:Optional[Any] = x * (1 + scale) + shift
return x
| 139
|
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A_ = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _snake_case :
_A : str
_A : Optional[str] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[Any] ):
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __UpperCamelCase ( self : List[Any] ):
return self.major, self.minor, self.patch
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return Version(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
return other
raise TypeError(F'''{other} (type {type(SCREAMING_SNAKE_CASE__ )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ):
try:
SCREAMING_SNAKE_CASE:List[str] = self._validate_operand(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Tuple = self._validate_operand(SCREAMING_SNAKE_CASE__ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCamelCase ( cls : str ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCamelCase ( self : Tuple ):
return self.version_str
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = _VERSION_REG.match(snake_case )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(snake_case ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def A_ ( snake_case ):
return ".".join(str(snake_case ) for v in version_tuple )
| 139
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase_ = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367
|
'''simple docstring'''
import torch
from transformers import AutoModel
class lowercase_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Union[str, Any]="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__lowerCamelCase , self ).__init__()
_SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(__lowerCamelCase , return_dict=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.nn.CosineSimilarity(3 , 1e-08 )
_SCREAMING_SNAKE_CASE = torch.nn.Softmax(dim=1 )
def lowerCAmelCase_ ( self : Dict , **__lowerCamelCase : Any ):
"""simple docstring"""
return self.bert(**__lowerCamelCase ).last_hidden_state
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__lowerCamelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : str , __lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = W_supports["sizes"].tolist()
_SCREAMING_SNAKE_CASE = W_supports["start_token_id"].item()
_SCREAMING_SNAKE_CASE = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_SCREAMING_SNAKE_CASE = self.BERT(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.BERT(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = W_supports["input_ids"] == start_token_id
_SCREAMING_SNAKE_CASE = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__lowerCamelCase ):
if i == 0:
_SCREAMING_SNAKE_CASE = 0
else:
_SCREAMING_SNAKE_CASE = support_sizes[i - 1]
_SCREAMING_SNAKE_CASE = S[s : s + size][start_token_masks[s : s + size]]
_SCREAMING_SNAKE_CASE = S[s : s + size][end_token_masks[s : s + size]]
_SCREAMING_SNAKE_CASE = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_SCREAMING_SNAKE_CASE = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_SCREAMING_SNAKE_CASE = torch.vstack((p_starts, p_start) )
_SCREAMING_SNAKE_CASE = torch.vstack((p_ends, p_end) )
else:
_SCREAMING_SNAKE_CASE = p_start
_SCREAMING_SNAKE_CASE = p_end
return p_starts, p_ends
| 111
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCAmelCase__ = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
lowerCAmelCase__ = '''▁'''
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
UpperCamelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCamelCase = len(self.sp_model ) - 1
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case_ (self , __a , __a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , __a , __a = None , __a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ (self ) -> Any:
return len(self.sp_model )
def snake_case_ (self ) -> int:
UpperCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ (self , __a ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case_ (self , __a ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(__a )
return spm_id if spm_id else self.unk_token_id
def snake_case_ (self , __a ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__a )
def snake_case_ (self , __a ) -> Union[str, Any]:
UpperCamelCase = []
UpperCamelCase = ""
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(__a )
UpperCamelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__(self ) -> str:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__(self , __a ) -> Optional[int]:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 153
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Dict = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 352
|
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase : int, lowercase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCamelCase = update_area_of_max_square(lowercase, col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1, col + 1 )
_UpperCamelCase = update_area_of_max_square(row + 1, lowercase )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0], lowercase )
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCamelCase = update_area_of_max_square_using_dp_array(lowercase, col + 1, lowercase )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1, col + 1, lowercase )
_UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1, lowercase, lowercase )
if mat[row][col]:
_UpperCamelCase = 1 + min([right, diagonal, down] )
_UpperCamelCase = max(largest_square_area[0], lowercase )
_UpperCamelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCamelCase = [0]
_UpperCamelCase = [[-1] * cols for _ in range(lowercase )]
update_area_of_max_square_using_dp_array(0, 0, lowercase )
return largest_square_area[0]
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
_UpperCamelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCamelCase = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
_UpperCamelCase = dp_array[row][col + 1]
_UpperCamelCase = dp_array[row + 1][col + 1]
_UpperCamelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(lowercase, lowercase, lowercase )
_UpperCamelCase = max(dp_array[row][col], lowercase )
else:
_UpperCamelCase = 0
return largest_square_area
def a__ ( lowercase : int, lowercase : int, lowercase : list[list[int]] ) -> int:
"""simple docstring"""
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = [0] * (cols + 1)
_UpperCamelCase = 0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
_UpperCamelCase = current_row[col + 1]
_UpperCamelCase = next_row[col + 1]
_UpperCamelCase = next_row[col]
if mat[row][col] == 1:
_UpperCamelCase = 1 + min(lowercase, lowercase, lowercase )
_UpperCamelCase = max(current_row[col], lowercase )
else:
_UpperCamelCase = 0
_UpperCamelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 287
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.