code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : int=13 , lowercase_ : List[str]=7 , lowercase_ : Tuple=True , lowercase_ : int=True , lowercase_ : List[str]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=99 , lowercase_ : str=32 , lowercase_ : int=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : str=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Union[str, Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Tuple=0.02 , lowercase_ : int=3 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=None , ) -> Any:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def __UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Tuple) -> List[str]:
"""simple docstring"""
_UpperCamelCase = DistilBertModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
_UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
_UpperCamelCase = DistilBertForMaskedLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
_UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : int , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = DistilBertForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
_UpperCamelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = DistilBertForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
_UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = DistilBertForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
_UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = DistilBertForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case__, snake_case__, unittest.TestCase ):
'''simple docstring'''
__A = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__A = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = True
__A = True
__A = True
__A = True
def __UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_UpperCamelCase = DistilBertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase_ , dim=37)
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase_)
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase_)
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase_)
def __UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase_)
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase_)
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase_)
@slow
def __UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DistilBertModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@slow
@require_torch_gpu
def __UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=UpperCAmelCase_)
_UpperCamelCase = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase = torch.jit.trace(
UpperCAmelCase_ , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , "traced_model.pt"))
_UpperCamelCase = torch.jit.load(os.path.join(UpperCAmelCase_ , "traced_model.pt") , map_location=UpperCAmelCase_)
loaded(inputs_dict["input_ids"].to(UpperCAmelCase_) , inputs_dict["attention_mask"].to(UpperCAmelCase_))
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = DistilBertModel.from_pretrained("distilbert-base-uncased")
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0]
_UpperCamelCase = torch.Size((1, 11, 768))
self.assertEqual(output.shape , UpperCAmelCase_)
_UpperCamelCase = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1e-4))
| 702 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert("RGB" )
_UpperCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
_UpperCamelCase = transform(a__ ).unsqueeze(0 ).to(a__ )
return image
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if "visual_encoder" in key:
_UpperCamelCase = re.sub("visual_encoder*" , "vision_model.encoder" , a__ )
if "blocks" in key:
_UpperCamelCase = re.sub(r"blocks" , "layers" , a__ )
if "attn" in key:
_UpperCamelCase = re.sub(r"attn" , "self_attn" , a__ )
if "norm1" in key:
_UpperCamelCase = re.sub(r"norm1" , "layer_norm1" , a__ )
if "norm2" in key:
_UpperCamelCase = re.sub(r"norm2" , "layer_norm2" , a__ )
if "encoder.norm" in key:
_UpperCamelCase = re.sub(r"encoder.norm" , "post_layernorm" , a__ )
if "encoder.patch_embed.proj" in key:
_UpperCamelCase = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , a__ )
if "encoder.pos_embed" in key:
_UpperCamelCase = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , a__ )
if "encoder.cls_token" in key:
_UpperCamelCase = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , a__ )
if "self_attn" in key:
_UpperCamelCase = re.sub(r"self_attn.proj" , "self_attn.projection" , a__ )
return key
@torch.no_grad()
def lowerCAmelCase__ ( a__ , a__=None ) ->int:
'''simple docstring'''
if config_path is not None:
_UpperCamelCase = BlipConfig.from_pretrained(a__ )
else:
_UpperCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
_UpperCamelCase = BlipForConditionalGeneration(a__ ).eval()
_UpperCamelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
_UpperCamelCase = blip_decoder(pretrained=a__ , image_size=384 , vit="base" )
_UpperCamelCase = pt_model.eval()
_UpperCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase = modified_state_dict.pop(a__ )
_UpperCamelCase = rename_key(a__ )
_UpperCamelCase = value
hf_model.load_state_dict(a__ )
_UpperCamelCase = 384
_UpperCamelCase = load_demo_image(image_size=a__ , device="cpu" )
_UpperCamelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
_UpperCamelCase = tokenizer(["a picture of"] ).input_ids
_UpperCamelCase = hf_model.generate(a__ , a__ )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
_UpperCamelCase = hf_model.generate(a__ )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(a__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCamelCase = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
_UpperCamelCase = blip_vqa(pretrained=a__ , image_size=a__ , vit="base" )
vqa_model.eval()
_UpperCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase = modified_state_dict.pop(a__ )
_UpperCamelCase = rename_key(a__ )
_UpperCamelCase = value
_UpperCamelCase = BlipForQuestionAnswering(a__ )
hf_vqa_model.load_state_dict(a__ )
_UpperCamelCase = ["How many dogs are in this image?"]
_UpperCamelCase = tokenizer(a__ , return_tensors="pt" ).input_ids
_UpperCamelCase = hf_vqa_model.generate(a__ , a__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
_UpperCamelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
_UpperCamelCase = blip_itm(pretrained=a__ , image_size=a__ , vit="base" )
itm_model.eval()
_UpperCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCamelCase = modified_state_dict.pop(a__ )
_UpperCamelCase = rename_key(a__ )
_UpperCamelCase = value
_UpperCamelCase = BlipForImageTextRetrieval(a__ )
_UpperCamelCase = ["A picture of a woman with a dog sitting in a beach"]
_UpperCamelCase = tokenizer(
a__ , return_tensors="pt" , padding="max_length" , truncation=a__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(a__ )
hf_itm_model.eval()
_UpperCamelCase = hf_itm_model(a__ , a__ , use_itm_head=a__ )
_UpperCamelCase = hf_itm_model(a__ , a__ , use_itm_head=a__ )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 703 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''marian'''
__A = ['''past_key_values''']
__A = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Any , lowercase_ : Union[str, Any]=58101 , lowercase_ : List[Any]=None , lowercase_ : str=1024 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Optional[int]=16 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=4096 , lowercase_ : Tuple=16 , lowercase_ : Any=0.0 , lowercase_ : int=0.0 , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=True , lowercase_ : str="gelu" , lowercase_ : Optional[int]=1024 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=58100 , lowercase_ : List[str]=False , lowercase_ : Union[str, Any]=58100 , lowercase_ : int=0 , lowercase_ : str=0 , lowercase_ : str=True , **lowercase_ : Any , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = decoder_vocab_size or vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = encoder_layers
_UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self : List[str]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
_UpperCamelCase = {0: 'batch'}
_UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
_UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="inputs")
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCamelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
_UpperCamelCase = self.num_layers
for i in range(UpperCamelCase_):
_UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_UpperCamelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
])
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self : str) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase = super().outputs
else:
_UpperCamelCase = super(UpperCamelCase_ , self).outputs
if self.use_past:
_UpperCamelCase = self.num_layers
for i in range(UpperCamelCase_):
_UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# Generate decoder inputs
_UpperCamelCase = seq_length if not self.use_past else 1
_UpperCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
_UpperCamelCase = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_UpperCamelCase = dict(**UpperCamelCase_ , **UpperCamelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_UpperCamelCase = common_inputs['input_ids'].shape
_UpperCamelCase = common_inputs['decoder_input_ids'].shape[1]
_UpperCamelCase = self.num_attention_heads
_UpperCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCamelCase = decoder_seq_length + 3
_UpperCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCamelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_)] , dim=1)
_UpperCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCamelCase = self.num_layers
_UpperCamelCase = min(UpperCamelCase_ , UpperCamelCase_)
_UpperCamelCase = max(UpperCamelCase_ , UpperCamelCase_) - min_num_layers
_UpperCamelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase_):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_),
torch.zeros(UpperCamelCase_),
torch.zeros(UpperCamelCase_),
torch.zeros(UpperCamelCase_),
))
# TODO: test this.
_UpperCamelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_)))
return common_inputs
def __UpperCAmelCase ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCamelCase = seqlen + 2
_UpperCamelCase = self.num_layers
_UpperCamelCase = self.num_attention_heads
_UpperCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCamelCase = common_inputs['attention_mask'].dtype
_UpperCamelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_)] , dim=1)
_UpperCamelCase = [
(torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_)) for _ in range(UpperCamelCase_)
]
return common_inputs
def __UpperCAmelCase ( self : int , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCamelCase = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCamelCase = tokenizer.num_special_tokens_to_add(UpperCamelCase_)
_UpperCamelCase = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_)
# Generate dummy inputs according to compute batch and sequence
_UpperCamelCase = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size
_UpperCamelCase = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_))
return common_inputs
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_)
else:
_UpperCamelCase = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_)
return common_inputs
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCamelCase = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
else:
_UpperCamelCase = super(UpperCamelCase_ , self)._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
@property
def __UpperCAmelCase ( self : List[Any]) -> float:
"""simple docstring"""
return 1e-4
| 704 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase__ ( a__ = 2_000_000 ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = [0]
_UpperCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_UpperCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_UpperCamelCase = 0
# an estimate of b, using the quadratic formula
_UpperCamelCase = 42
# the largest integer less than b_estimate
_UpperCamelCase = 42
# the largest integer less than b_estimate
_UpperCamelCase = 42
# the triangle number corresponding to b_floor
_UpperCamelCase = 42
# the triangle number corresponding to b_ceil
_UpperCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_UpperCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_UpperCamelCase = floor(_A )
_UpperCamelCase = ceil(_A )
_UpperCamelCase = triangle_numbers[b_floor]
_UpperCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_UpperCamelCase = triangle_b_first_guess * triangle_a
_UpperCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_UpperCamelCase = triangle_b_second_guess * triangle_a
_UpperCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 705 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase__ ( a__ , a__=False ) ->List[str]:
'''simple docstring'''
try:
_UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCamelCase = strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowerCamelCase__ = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
return unittest.skip("Test was skipped" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->List[str]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , "test is slow" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->List[str]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__=None , a__=None ) ->Any:
'''simple docstring'''
if test_case is None:
return partial(lowerCamelCase_ , version=lowerCamelCase_ )
return unittest.skipUnless(is_torch_version(">=" , lowerCamelCase_ ) , f'test requires torch version >= {version}' )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(lowerCamelCase_ )
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(lowerCamelCase_ )
lowerCamelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(lowerCamelCase_ )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
__A = True
@classmethod
def __UpperCAmelCase ( cls : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls : Any) -> Optional[Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def __UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob("**/*"):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase_)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict , lowercase_ : Union[mock.Mock, List[mock.Mock]]) -> Any:
"""simple docstring"""
_UpperCamelCase = mocks if isinstance(UpperCAmelCase_ , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = AcceleratorState()
_UpperCamelCase = tensor[None].clone().to(state.device )
_UpperCamelCase = gather(lowerCamelCase_ ).cpu()
_UpperCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase_ ):
return False
return True
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = returncode
_UpperCamelCase = stdout
_UpperCamelCase = stderr
async def lowerCAmelCase__ ( a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
while True:
_UpperCamelCase = await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def lowerCAmelCase__ ( a__ , a__=None , a__=None , a__=None , a__=False , a__=False ) ->_RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(lowerCamelCase_ ) )
_UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCamelCase = []
_UpperCamelCase = []
def tee(a__ , a__ , a__ , a__="" ):
_UpperCamelCase = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a__ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda a__ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label="stderr:" ) ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( a__ , a__=None , a__=None , a__=180 , a__=False , a__=True ) ->_RunOutput:
'''simple docstring'''
_UpperCamelCase = asyncio.get_event_loop()
_UpperCamelCase = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
_UpperCamelCase = ' '.join(lowerCamelCase_ )
if result.returncode > 0:
_UpperCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( a__ , a__=False ) ->Dict:
'''simple docstring'''
try:
_UpperCamelCase = subprocess.check_output(lowerCamelCase_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase_ , "decode" ):
_UpperCamelCase = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(lowerCamelCase_ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 706 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 0 |
def lowerCAmelCase__ ( a__ ) ->list:
'''simple docstring'''
_UpperCamelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase = True
for i in range(0 , len(a__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase = False
for i in range(1 , len(a__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCamelCase__ = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 707 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TapasConfig.from_json_file(__A )
# set absolute/relative position embeddings parameter
_UpperCamelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCamelCase = TapasForQuestionAnswering(config=__A )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCamelCase = 4
_UpperCamelCase = True
# hparam_utils.py hparams
_UpperCamelCase = 0.664694
_UpperCamelCase = 0.207951
_UpperCamelCase = 0.121194
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = 0.0352513
_UpperCamelCase = TapasForQuestionAnswering(config=__A )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCamelCase = 4
_UpperCamelCase = False
# hparam_utils.py hparams
_UpperCamelCase = 36.4519
_UpperCamelCase = 0.903421
_UpperCamelCase = 222.088
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 0.763141
_UpperCamelCase = TapasForQuestionAnswering(config=__A )
elif task == "TABFACT":
_UpperCamelCase = TapasForSequenceClassification(config=__A )
elif task == "MLM":
_UpperCamelCase = TapasForMaskedLM(config=__A )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCamelCase = TapasModel(config=__A )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__A , __A , __A )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__A )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCamelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__A )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 708 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bloom'''
__A = ['''past_key_values''']
__A = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : List[str] , lowercase_ : Tuple=250880 , lowercase_ : List[Any]=64 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=8 , lowercase_ : str=1e-5 , lowercase_ : Tuple=0.02 , lowercase_ : Dict=True , lowercase_ : int=1 , lowercase_ : Dict=2 , lowercase_ : Dict=False , lowercase_ : Optional[Any]=0.0 , lowercase_ : int=0.0 , lowercase_ : List[str]=1 , lowercase_ : Tuple=False , **lowercase_ : List[str] , ) -> str:
"""simple docstring"""
_UpperCamelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCamelCase = kwargs.pop("n_embed" , _lowerCAmelCase)
_UpperCamelCase = hidden_size if n_embed is None else n_embed
_UpperCamelCase = n_layer
_UpperCamelCase = n_head
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = use_cache
_UpperCamelCase = pretraining_tp
_UpperCamelCase = apply_residual_connection_post_layernorm
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
_UpperCamelCase = slow_but_exact
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = version.parse('''1.12''' )
def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : Dict = "default" , lowercase_ : Union[str, Any] = None , lowercase_ : Union[str, Any] = False , ) -> Tuple:
"""simple docstring"""
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase)
if not getattr(self._config , "pad_token_id" , _lowerCAmelCase):
# TODO: how to do that better?
_UpperCamelCase = 0
@property
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" , inverted_values_shape=_lowerCAmelCase)
_UpperCamelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCamelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self._config.n_layer
@property
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self._config.n_head
@property
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return 1e-3
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : int = -1 , lowercase_ : Union[str, Any] = -1 , lowercase_ : Optional[Any] = False , lowercase_ : int = None , ) -> int:
"""simple docstring"""
_UpperCamelCase = super(_lowerCAmelCase , self).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
# We need to order the input in the way they appears in the forward()
_UpperCamelCase = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_UpperCamelCase , _UpperCamelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCamelCase = seqlen + 2
_UpperCamelCase = self._config.hidden_size // self.num_attention_heads
_UpperCamelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCamelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCamelCase = [
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(self.num_layers)
]
_UpperCamelCase = common_inputs["attention_mask"]
if self.use_past:
_UpperCamelCase = ordered_inputs["attention_mask"].dtype
_UpperCamelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
return ordered_inputs
@property
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return 13
| 709 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCamelCase__ = get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Optional[str] = None) -> Tuple:
"""simple docstring"""
_UpperCamelCase = (
os.path.join(__A , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCamelCase = Extractor
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCamelCase = os.path.abspath(__A)
return os.path.join(self.extract_dir , hash_url_to_filename(__A))
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str , lowercase_ : bool) -> List[Any]:
"""simple docstring"""
return force_extract or (
not os.path.isfile(__A) and not (os.path.isdir(__A) and os.listdir(__A))
)
def __UpperCAmelCase ( self : Tuple , lowercase_ : str , lowercase_ : bool = False) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.extractor.infer_extractor_format(__A)
if not extractor_format:
return input_path
_UpperCamelCase = self._get_output_path(__A)
if self._do_extract(__A , __A):
self.extractor.extract(__A , __A , __A)
return output_path
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
@classmethod
@abstractmethod
def __UpperCAmelCase ( cls : Union[str, Any] , lowercase_ : Union[Path, str] , **lowercase_ : int) -> Dict:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str]) -> Tuple:
"""simple docstring"""
...
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase ):
'''simple docstring'''
__A = []
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : int) -> Optional[Any]:
"""simple docstring"""
with open(__A , "rb") as f:
return f.read(__A)
@classmethod
def __UpperCAmelCase ( cls : int , lowercase_ : Union[Path, str] , lowercase_ : bytes = b"") -> Any:
"""simple docstring"""
if not magic_number:
_UpperCamelCase = max(len(__A) for cls_magic_number in cls.magic_numbers)
try:
_UpperCamelCase = cls.read_magic_number(__A , __A)
except OSError:
return False
return any(magic_number.startswith(__A) for cls_magic_number in cls.magic_numbers)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , lowercase_ : Union[Path, str] , **lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
return tarfile.is_tarfile(__A)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict) -> int:
"""simple docstring"""
def resolved(lowercase_ : str) -> str:
return os.path.realpath(os.path.abspath(__A))
def badpath(lowercase_ : str , lowercase_ : str) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__A , __A)).startswith(__A)
def badlink(lowercase_ : int , lowercase_ : str) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCamelCase = resolved(os.path.join(__A , os.path.dirname(info.name)))
return badpath(info.linkname , base=__A)
_UpperCamelCase = resolved(__A)
for finfo in members:
if badpath(finfo.name , __A):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)')
elif finfo.issym() and badlink(__A , __A):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}')
elif finfo.islnk() and badlink(__A , __A):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}')
else:
yield finfo
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str]) -> List[Any]:
"""simple docstring"""
os.makedirs(__A , exist_ok=__A)
_UpperCamelCase = tarfile.open(__A)
tar_file.extractall(__A , members=TarExtractor.safemembers(__A , __A))
tar_file.close()
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = [B'''\x1F\x8B''']
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str]) -> str:
"""simple docstring"""
with gzip.open(__A , "rb") as gzip_file:
with open(__A , "wb") as extracted_file:
shutil.copyfileobj(__A , __A)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , lowercase_ : Union[Path, str] , lowercase_ : bytes = b"") -> Optional[int]:
"""simple docstring"""
if super().is_extractable(__A , magic_number=__A):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__A , "rb") as fp:
_UpperCamelCase = _EndRecData(__A)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCamelCase = fp.read(__A) # CD is where we expect it to be
if len(__A) == sizeCentralDir:
_UpperCamelCase = struct.unpack(__A , __A) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str]) -> Optional[Any]:
"""simple docstring"""
os.makedirs(__A , exist_ok=__A)
with zipfile.ZipFile(__A , "r") as zip_file:
zip_file.extractall(__A)
zip_file.close()
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str]) -> Any:
"""simple docstring"""
with lzma.open(__A) as compressed_file:
with open(__A , "wb") as extracted_file:
shutil.copyfileobj(__A , __A)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str]) -> Any:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile")
import rarfile
os.makedirs(__A , exist_ok=__A)
_UpperCamelCase = rarfile.RarFile(__A)
rf.extractall(__A)
rf.close()
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str]) -> Union[str, Any]:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard")
import zstandard as zstd
_UpperCamelCase = zstd.ZstdDecompressor()
with open(__A , "rb") as ifh, open(__A , "wb") as ofh:
dctx.copy_stream(__A , __A)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = [B'''\x42\x5A\x68''']
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str]) -> Dict:
"""simple docstring"""
with bza.open(__A , "rb") as compressed_file:
with open(__A , "wb") as extracted_file:
shutil.copyfileobj(__A , __A)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str]) -> Tuple:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr")
import pyazr
os.makedirs(__A , exist_ok=__A)
with pyazr.SevenZipFile(__A , "r") as archive:
archive.extractall(__A)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = [B'''\x04\x22\x4D\x18''']
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str]) -> List[Any]:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4")
import lza.frame
with lza.frame.open(__A , "rb") as compressed_file:
with open(__A , "wb") as extracted_file:
shutil.copyfileobj(__A , __A)
class _UpperCAmelCase :
'''simple docstring'''
__A = {
'''tar''': TarExtractor,
'''gzip''': GzipExtractor,
'''zip''': ZipExtractor,
'''xz''': XzExtractor,
'''rar''': RarExtractor,
'''zstd''': ZstdExtractor,
'''bz2''': BzipaExtractor,
'''7z''': SevenZipExtractor, # <Added version="2.4.0"/>
'''lz4''': LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __UpperCAmelCase ( cls : Optional[Any]) -> int:
"""simple docstring"""
return max(
len(__A)
for extractor in cls.extractors.values()
if issubclass(__A , __A)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Union[Path, str] , lowercase_ : int) -> Any:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(__A , magic_number_length=__A)
except OSError:
return b""
@classmethod
def __UpperCAmelCase ( cls : List[str] , lowercase_ : Union[Path, str] , lowercase_ : bool = False) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=__A , )
_UpperCamelCase = cls.infer_extractor_format(__A)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , lowercase_ : Union[Path, str]) -> int: # <Added version="2.4.0"/>
"""simple docstring"""
_UpperCamelCase = cls._get_magic_number_max_length()
_UpperCamelCase = cls._read_magic_number(__A , __A)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__A , magic_number=__A):
return extractor_format
@classmethod
def __UpperCAmelCase ( cls : List[Any] , lowercase_ : Union[Path, str] , lowercase_ : Union[Path, str] , lowercase_ : Optional[str] = None , lowercase_ : Optional[BaseExtractor] = "deprecated" , ) -> Optional[int]:
"""simple docstring"""
os.makedirs(os.path.dirname(__A) , exist_ok=__A)
# Prevent parallel extractions
_UpperCamelCase = str(Path(__A).with_suffix(".lock"))
with FileLock(__A):
shutil.rmtree(__A , ignore_errors=__A)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__A , __A): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=__A , )
_UpperCamelCase = extractor if extractor != "deprecated" else extractor_format
else:
_UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(__A , __A)
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=__A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__A):
return extractor.extract(__A , __A)
| 711 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
__A = """decision_transformer"""
__A = ["""past_key_values"""]
__A = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] , lowercase_ : Optional[Any]=17 , lowercase_ : int=4 , lowercase_ : Optional[int]=128 , lowercase_ : Optional[int]=4096 , lowercase_ : str=True , lowercase_ : Optional[int]=1 , lowercase_ : List[str]=1024 , lowercase_ : Any=3 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=None , lowercase_ : List[Any]="relu" , lowercase_ : List[str]=0.1 , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=1e-5 , lowercase_ : Optional[int]=0.02 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : int=50256 , lowercase_ : Optional[Any]=50256 , lowercase_ : str=False , lowercase_ : Any=False , **lowercase_ : Optional[int] , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = state_dim
_UpperCamelCase = act_dim
_UpperCamelCase = hidden_size
_UpperCamelCase = max_ep_len
_UpperCamelCase = action_tanh
_UpperCamelCase = vocab_size
_UpperCamelCase = n_positions
_UpperCamelCase = n_layer
_UpperCamelCase = n_head
_UpperCamelCase = n_inner
_UpperCamelCase = activation_function
_UpperCamelCase = resid_pdrop
_UpperCamelCase = embd_pdrop
_UpperCamelCase = attn_pdrop
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = scale_attn_weights
_UpperCamelCase = use_cache
_UpperCamelCase = scale_attn_by_inverse_layer_idx
_UpperCamelCase = reorder_and_upcast_attn
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a)
| 712 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
def lowerCAmelCase__ ( a__ , a__=100 , a__=" " ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = text.split(__A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )]
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__A ):
titles.append(title if title is not None else "" )
texts.append(__A )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__A , padding="longest" , return_tensors="pt" )["""input_ids"""]
_UpperCamelCase = ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( a__ , a__ , a__ , ) ->int:
'''simple docstring'''
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCamelCase = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCamelCase = dataset.map(__A , batched=__A , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCamelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A )
_UpperCamelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCamelCase = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCamelCase = dataset.map(
partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , )
# And finally save your dataset
_UpperCamelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCamelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__A )
# And save the index
_UpperCamelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = field(
default=str(Path(__lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ), metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''}, )
__A = field(
default=__lowerCAmelCase, metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''}, )
__A = field(
default='''facebook/rag-sequence-nq''', metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''}, )
__A = field(
default='''facebook/dpr-ctx_encoder-multiset-base''', metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
}, )
__A = field(
default=str(Path(__lowerCAmelCase ).parent / '''test_run''' / '''dummy-kb''' ), metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''}, )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = field(
default=__lowerCAmelCase, metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
}, )
__A = field(
default=16, metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
}, )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = field(
default=768, metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''}, )
__A = field(
default=128, metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
}, )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 715 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=13 , lowercase_ : Any=10 , lowercase_ : int=3 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=2 , lowercase_ : Dict=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Dict=4 , lowercase_ : Union[str, Any]=37 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : int=10 , lowercase_ : int=0.02 , lowercase_ : List[str]="divided_space_time" , lowercase_ : Optional[Any]=None , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = patch_size
_UpperCamelCase = num_frames
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = attention_type
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = (num_frames) * self.num_patches_per_frame + 1
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_UpperCamelCase = self.num_labels
return config
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TimesformerModel(config=_A)
model.to(_A)
model.eval()
_UpperCamelCase = model(_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = TimesformerForVideoClassification(_A)
model.to(_A)
model.eval()
_UpperCamelCase = model(_A)
# verify the logits shape
_UpperCamelCase = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape , _A)
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_and_inputs
_UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( a__, a__, unittest.TestCase ):
'''simple docstring'''
__A = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = TimesformerModelTester(self)
_UpperCamelCase = ConfigTester(
self , config_class=_A , has_text_modality=_A , hidden_size=37)
def __UpperCAmelCase ( self : Any , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str=False) -> str:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(_A)
if return_labels:
if model_class in get_values(_A):
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A)
return inputs_dict
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds")
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear))
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def __UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_A)
@slow
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TimesformerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def __UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = True
for model_class in self.all_model_classes:
_UpperCamelCase = self.model_tester.seq_length
_UpperCamelCase = self.model_tester.num_frames
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(_A , _A))
_UpperCamelCase = outputs.attentions
self.assertEqual(len(_A) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCamelCase = True
_UpperCamelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(_A , _A))
_UpperCamelCase = outputs.attentions
self.assertEqual(len(_A) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_UpperCamelCase = len(_A)
# Check attention is always last and order is fine
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(_A , _A))
self.assertEqual(out_len + 1 , len(_A))
_UpperCamelCase = outputs.attentions
self.assertEqual(len(_A) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int]):
_UpperCamelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(_A , _A))
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_A) , _A)
_UpperCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(_A , _A , _A)
def lowerCAmelCase__ ( ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
_UpperCamelCase = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCamelCase = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400").to(
_A)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_video()
_UpperCamelCase = image_processor(video[:8] , return_tensors="pt").to(_A)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**_A)
# verify the logits
_UpperCamelCase = torch.Size((1, 400))
self.assertEqual(outputs.logits.shape , _A)
_UpperCamelCase = torch.tensor([-0.30_16, -0.77_13, -0.42_05]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4))
| 716 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 0 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str]=13 , lowercase_ : Union[str, Any]=30 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[int]=3 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : List[str]=32 , lowercase_ : Optional[Any]=2 , lowercase_ : str=4 , lowercase_ : List[Any]=37 , lowercase_ : Tuple="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[str]=10 , lowercase_ : List[Any]=0.02 , lowercase_ : int=3 , lowercase_ : List[str]=0.6 , lowercase_ : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = mask_ratio
_UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCAmelCase ( self : Tuple , lowercase_ : Any , lowercase_ : str , lowercase_ : Any) -> Tuple:
"""simple docstring"""
_UpperCamelCase = TFViTMAEModel(config=__lowerCamelCase)
_UpperCamelCase = model(__lowerCamelCase , training=__lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFViTMAEForPreTraining(__lowerCamelCase)
_UpperCamelCase = model(__lowerCamelCase , training=__lowerCamelCase)
# expected sequence length = num_patches
_UpperCamelCase = (self.image_size // self.patch_size) ** 2
_UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = TFViTMAEForPreTraining(__lowerCamelCase)
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCamelCase = model(__lowerCamelCase , training=__lowerCamelCase)
_UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(_UpperCamelCase) = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _A, _A, unittest.TestCase ):
'''simple docstring'''
__A = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__A = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = TFViTMAEModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37)
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds")
def __UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Layer))
def __UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__lowerCamelCase)
_UpperCamelCase = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def __UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase)
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2)
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2)
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__lowerCamelCase)
_UpperCamelCase = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_UpperCamelCase = model(__lowerCamelCase , noise=__lowerCamelCase)
_UpperCamelCase = copy.deepcopy(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_UpperCamelCase = model(**__lowerCamelCase , noise=__lowerCamelCase)
_UpperCamelCase = outputs_dict[0].numpy()
_UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)) , 1e-6)
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
np.random.seed(2)
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2)
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
def prepare_numpy_arrays(lowercase_ : List[str]):
_UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowerCamelCase):
_UpperCamelCase = v.numpy()
else:
_UpperCamelCase = np.array(__lowerCamelCase)
return inputs_np_dict
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__lowerCamelCase)
_UpperCamelCase = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_UpperCamelCase = prepare_numpy_arrays(__lowerCamelCase)
_UpperCamelCase = model(__lowerCamelCase , noise=__lowerCamelCase)
_UpperCamelCase = model(**__lowerCamelCase , noise=__lowerCamelCase)
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase)
def __UpperCAmelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any]) -> Optional[int]:
"""simple docstring"""
np.random.seed(2)
_UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2)
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
_UpperCamelCase = tf.constant(__lowerCamelCase)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCamelCase = tf_noise
super().check_pt_tf_models(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
np.random.seed(2)
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(__lowerCamelCase)
if module_member_name.endswith("MainLayer")
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")]
for module_member in (getattr(__lowerCamelCase , __lowerCamelCase),)
if isinstance(__lowerCamelCase , __lowerCamelCase)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowerCamelCase , "_keras_serializable" , __lowerCamelCase)
}
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2)
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
_UpperCamelCase = tf.convert_to_tensor(__lowerCamelCase)
inputs_dict.update({"noise": noise})
for main_layer_class in tf_main_layer_classes:
_UpperCamelCase = main_layer_class(__lowerCamelCase)
_UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
_UpperCamelCase = tf.keras.Model(__lowerCamelCase , outputs=main_layer(__lowerCamelCase))
_UpperCamelCase = model(__lowerCamelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(__lowerCamelCase , "keras_model.h5")
model.save(__lowerCamelCase)
_UpperCamelCase = tf.keras.models.load_model(
__lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class})
assert isinstance(__lowerCamelCase , tf.keras.Model)
_UpperCamelCase = model(__lowerCamelCase)
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase)
@slow
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
np.random.seed(2)
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2)
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__lowerCamelCase)
_UpperCamelCase = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_UpperCamelCase = model(__lowerCamelCase , noise=__lowerCamelCase)
if model_class.__name__ == "TFViTMAEModel":
_UpperCamelCase = outputs.last_hidden_state.numpy()
_UpperCamelCase = 0
else:
_UpperCamelCase = outputs.logits.numpy()
_UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase)
_UpperCamelCase = model_class.from_pretrained(__lowerCamelCase)
_UpperCamelCase = model(__lowerCamelCase , noise=__lowerCamelCase)
if model_class.__name__ == "TFViTMAEModel":
_UpperCamelCase = after_outputs["last_hidden_state"].numpy()
_UpperCamelCase = 0
else:
_UpperCamelCase = after_outputs["logits"].numpy()
_UpperCamelCase = 0
_UpperCamelCase = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__lowerCamelCase , 1e-5)
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2)
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2)
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__lowerCamelCase)
_UpperCamelCase = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_UpperCamelCase = model(__lowerCamelCase , noise=__lowerCamelCase)
_UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowerCamelCase)
_UpperCamelCase = model_class.from_config(model.get_config())
# make sure it also accepts a normal config
_UpperCamelCase = model_class.from_config(model.config)
_UpperCamelCase = new_model(__lowerCamelCase) # Build model
new_model.set_weights(model.get_weights())
_UpperCamelCase = new_model(__lowerCamelCase , noise=__lowerCamelCase)
self.assert_outputs_same(__lowerCamelCase , __lowerCamelCase)
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.")
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load")
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
pass
@slow
def __UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224")
self.assertIsNotNone(__lowerCamelCase)
def lowerCAmelCase__ ( ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base") if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
np.random.seed(2)
_UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base")
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__lowerCamelCase , return_tensors="tf")
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCamelCase = ViTMAEConfig()
_UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
_UpperCamelCase = np.random.uniform(size=(1, num_patches))
# forward pass
_UpperCamelCase = model(**__lowerCamelCase , noise=__lowerCamelCase)
# verify the logits
_UpperCamelCase = tf.convert_to_tensor([1, 196, 768])
self.assertEqual(outputs.logits.shape , __lowerCamelCase)
_UpperCamelCase = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1e-4)
| 717 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
def __lt__( self : Optional[int] , lowercase_ : List[str]) -> Dict:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : List[Any] , lowercase_ : int) -> Optional[Any]:
"""simple docstring"""
return self[-1] == other[-1]
def lowerCAmelCase__ ( a__ ) ->list:
'''simple docstring'''
_UpperCamelCase = []
# sort into stacks
for element in collection:
_UpperCamelCase = Stack([element] )
_UpperCamelCase = bisect_left(a_ , a_ )
if i != len(a_ ):
stacks[i].append(a_ )
else:
stacks.append(a_ )
# use a heap-based merge to merge stack efficiently
_UpperCamelCase = merge(*(reversed(a_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 718 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
lowerCamelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
lowerCamelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
lowerCamelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
lowerCamelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
lowerCamelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
lowerCamelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
lowerCamelCase__ = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
lowerCamelCase__ = state_dict['''cls.predictions.decoder.weight''']
lowerCamelCase__ = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F"cls.predictions.transform.dense.{w}"]
lowerCamelCase__ = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint)
| 719 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase__ = get_tests_dir('''fixtures''')
lowerCamelCase__ = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCamelCase__ = get_tests_dir('''fixtures/dummy-config.json''')
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = 0
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
self.assertIsInstance(snake_case_ , snake_case_)
def __UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(snake_case_)
self.assertIsInstance(snake_case_ , snake_case_)
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(snake_case_).to_dict()
config_dict.pop("feature_extractor_type")
_UpperCamelCase = WavaVecaFeatureExtractor(**snake_case_)
# save in new folder
model_config.save_pretrained(snake_case_)
config.save_pretrained(snake_case_)
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(snake_case_)
# make sure private variable is not incorrectly saved
_UpperCamelCase = json.loads(config.to_json_string())
self.assertTrue("_processor_class" not in dict_as_saved)
self.assertIsInstance(snake_case_ , snake_case_)
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(snake_case_)
self.assertIsInstance(snake_case_ , snake_case_)
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
snake_case_ , "bert-base is not a local folder and is not a valid model identifier"):
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("bert-base")
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(
snake_case_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(snake_case_ , revision="aaaaaa")
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
snake_case_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model")
def __UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(snake_case_):
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor")
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case_):
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case_)
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case_)
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor")
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case_)
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(snake_case_ , trust_remote_code=snake_case_)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor")
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register("custom" , snake_case_)
AutoFeatureExtractor.register(snake_case_ , snake_case_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_):
AutoFeatureExtractor.register(snake_case_ , snake_case_)
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCamelCase = CustomFeatureExtractor.from_pretrained(snake_case_)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case_)
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(snake_case_)
self.assertIsInstance(snake_case_ , snake_case_)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
class _UpperCAmelCase ( _a ):
'''simple docstring'''
__A = True
try:
AutoConfig.register("custom" , snake_case_)
AutoFeatureExtractor.register(snake_case_ , snake_case_)
# If remote code is not set, the default is to use local
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor")
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor")
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case_)
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor")
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case_)
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor")
self.assertTrue(not hasattr(snake_case_ , "is_local"))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 720 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
return (data["data"], data["target"])
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = XGBClassifier()
classifier.fit(__UpperCamelCase , __UpperCamelCase )
return classifier
def lowerCAmelCase__ ( ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = load_iris()
_UpperCamelCase = data_handling(__UpperCamelCase )
_UpperCamelCase = train_test_split(
__UpperCamelCase , __UpperCamelCase , test_size=0.25 )
_UpperCamelCase = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
_UpperCamelCase = xgboost(__UpperCamelCase , __UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , display_labels=__UpperCamelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 721 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 0 |
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = len(snake_case__ )
print("The following activities are selected:" )
# The first activity is always selected
_UpperCamelCase = 0
print(snake_case__ , end="," )
# Consider rest of the activities
for j in range(snake_case__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case__ , end="," )
_UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [1, 3, 0, 5, 8, 5]
lowerCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 700 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase ( enum.Enum ):
__A = 0
__A = 1
__A = 2
@add_end_docstrings(lowercase__ )
class _UpperCAmelCase ( lowercase__ ):
__A = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase = None
if self.model.config.prefix is not None:
_UpperCamelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=lowercase_ , **self._forward_params)
_UpperCamelCase = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase = {**self._forward_params, **forward_params}
def __UpperCAmelCase ( self : int , lowercase_ : Dict=None , lowercase_ : List[Any]=None , lowercase_ : List[str]=None , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : int=None , lowercase_ : Optional[Any]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any] , ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
if prefix is not None:
_UpperCamelCase = prefix
if prefix:
_UpperCamelCase = self.tokenizer(
lowercase_ , padding=lowercase_ , add_special_tokens=lowercase_ , return_tensors=self.framework)
_UpperCamelCase = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
" [None, 'hole']")
_UpperCamelCase = handle_long_generation
preprocess_params.update(lowercase_)
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
_UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
_UpperCamelCase = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[Any]) -> Optional[int]:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True})
return super()._parse_and_tokenize(*lowercase_ , **lowercase_)
def __call__( self : List[str] , lowercase_ : List[str] , **lowercase_ : List[str]) -> Dict:
"""simple docstring"""
return super().__call__(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Dict="" , lowercase_ : Dict=None , **lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.tokenizer(
prefix + prompt_text , padding=lowercase_ , add_special_tokens=lowercase_ , return_tensors=self.framework)
_UpperCamelCase = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_new_tokens"]
else:
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected")
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length")
_UpperCamelCase = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase = inputs["attention_mask"][:, -keep_length:]
return inputs
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : int , **lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs["input_ids"]
_UpperCamelCase = model_inputs.get("attention_mask" , lowercase_)
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
else:
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = model_inputs.pop("prompt_text")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase = generate_kwargs.pop("prefix_length" , 0)
if prefix_length > 0:
_UpperCamelCase = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase = generate_kwargs.get("max_length") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase = self.model.generate(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_)
_UpperCamelCase = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase = generated_sequence.reshape(lowercase_ , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : int=ReturnType.FULL_TEXT , lowercase_ : Dict=True) -> str:
"""simple docstring"""
_UpperCamelCase = model_outputs["generated_sequence"][0]
_UpperCamelCase = model_outputs["input_ids"]
_UpperCamelCase = model_outputs["prompt_text"]
_UpperCamelCase = generated_sequence.numpy().tolist()
_UpperCamelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase = self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase = 0
else:
_UpperCamelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , ))
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase = prompt_text + text[prompt_length:]
else:
_UpperCamelCase = text[prompt_length:]
_UpperCamelCase = {"generated_text": all_text}
records.append(lowercase_)
return records
| 701 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 0 |
'''simple docstring'''
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
if index == r:
for j in range(a__ ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_UpperCamelCase = arr[i]
combination_util(a__ , a__ , a__ , index + 1 , a__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(a__ , a__ , a__ , a__ , a__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(a__ , a__ , a__ , 0 , a__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCamelCase__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 702 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = args.log_outputs
_UpperCamelCase = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
_UpperCamelCase = load_metric("wer" )
_UpperCamelCase = load_metric("cer" )
# compute metrics
_UpperCamelCase = wer.compute(references=result["target"] , predictions=result["prediction"] )
_UpperCamelCase = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
_UpperCamelCase = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case_ )
with open(f'{dataset_id}_eval_results.txt' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_UpperCamelCase = f'log_{dataset_id}_predictions.txt'
_UpperCamelCase = f'log_{dataset_id}_targets.txt'
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(a__ , a__ ):
p.write(f'{i}' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'{i}' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_UpperCamelCase = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_UpperCamelCase = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
_UpperCamelCase = " ".join(text.split(snake_case_ ) )
return text
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(args.model_id )
_UpperCamelCase = feature_extractor.sampling_rate
# resample audio
_UpperCamelCase = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
_UpperCamelCase = 0 if torch.cuda.is_available() else -1
_UpperCamelCase = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(a__ ):
_UpperCamelCase = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_UpperCamelCase = prediction["text"]
_UpperCamelCase = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
_UpperCamelCase = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 703 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
__A = '''Salesforce/blip-image-captioning-base'''
__A = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
__A = '''image_captioner'''
__A = AutoModelForVisionaSeq
__A = ['''image''']
__A = ['''text''']
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["vision"])
super().__init__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Tuple , lowercase_ : "Image") -> Optional[Any]:
"""simple docstring"""
return self.pre_processor(images=lowercase_ , return_tensors="pt")
def __UpperCAmelCase ( self : Any , lowercase_ : Any) -> Any:
"""simple docstring"""
return self.model.generate(**lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_)[0].strip()
| 704 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''', '''False''' ) ) is not True, reason='''Skipping test because should only be run when releasing minor transformers version''', )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=_a , )
assert hasattr(self , "env")
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
_UpperCamelCase = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_a , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version="py36" , )
def __UpperCAmelCase ( self : Tuple , lowercase_ : Tuple) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_a).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv')
@parameterized.expand([(2,)])
def __UpperCAmelCase ( self : Any , lowercase_ : str) -> str:
"""simple docstring"""
_UpperCamelCase = self.create_estimator(_a)
# run training
estimator.fit()
# result dataframe
_UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _a)
| 705 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase__ = get_logger(__name__)
class SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
__A = '''all_checks'''
__A = '''basic_checks'''
__A = '''no_checks'''
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
def lowerCAmelCase__ ( a__ , a__ , a__=None ) ->Any:
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(snake_case__ ) - set(snake_case__ ) ) )
_UpperCamelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCamelCase = """ for """ + verification_name if verification_name is not None else """"""
if len(snake_case__ ) > 0:
raise NonMatchingChecksumError(
f'Checksums didn\'t match{for_verification_name}:\n'
f'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
class SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise ExpectedMoreSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
if len(set(snake_case__ ) - set(snake_case__ ) ) > 0:
raise UnexpectedSplits(str(set(snake_case__ ) - set(snake_case__ ) ) )
_UpperCamelCase = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(snake_case__ ) > 0:
raise NonMatchingSplitsSizesError(str(snake_case__ ) )
logger.info("All the splits matched successfully." )
def lowerCAmelCase__ ( a__ , a__ = True ) ->int:
'''simple docstring'''
if record_checksum:
_UpperCamelCase = shaaaa()
with open(snake_case__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(snake_case__ )
_UpperCamelCase = m.hexdigest()
else:
_UpperCamelCase = None
return {"num_bytes": os.path.getsize(snake_case__ ), "checksum": checksum}
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 706 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
__A = '''distilbert'''
__A = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : List[Any] , lowercase_ : str=30522 , lowercase_ : List[Any]=512 , lowercase_ : Any=False , lowercase_ : List[str]=6 , lowercase_ : str=12 , lowercase_ : Optional[Any]=768 , lowercase_ : Any=4 * 768 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.1 , lowercase_ : Any="gelu" , lowercase_ : Tuple=0.02 , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.2 , lowercase_ : str=0 , **lowercase_ : List[str] , ) -> Any:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = sinusoidal_pos_embds
_UpperCamelCase = n_layers
_UpperCamelCase = n_heads
_UpperCamelCase = dim
_UpperCamelCase = hidden_dim
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation
_UpperCamelCase = initializer_range
_UpperCamelCase = qa_dropout
_UpperCamelCase = seq_classif_dropout
super().__init__(**lowercase_ , pad_token_id=lowercase_)
class _UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 708 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( a__ ):
'''simple docstring'''
__A = """gptsan-japanese"""
__A = [
"""past_key_values""",
]
__A = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowercase_ : Tuple=36000 , lowercase_ : Any=1280 , lowercase_ : Optional[int]=1024 , lowercase_ : Optional[Any]=8192 , lowercase_ : int=4096 , lowercase_ : Optional[Any]=128 , lowercase_ : List[str]=10 , lowercase_ : Any=0 , lowercase_ : Dict=16 , lowercase_ : List[str]=16 , lowercase_ : str=128 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Tuple=1e-5 , lowercase_ : Union[str, Any]=False , lowercase_ : List[Any]=0.0 , lowercase_ : Dict="float32" , lowercase_ : List[Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=False , lowercase_ : Dict=0.0_02 , lowercase_ : Tuple=False , lowercase_ : Any=True , lowercase_ : Optional[int]=35998 , lowercase_ : Optional[int]=35995 , lowercase_ : str=35999 , **lowercase_ : str , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = d_ff
_UpperCamelCase = d_ext
_UpperCamelCase = d_spout
_UpperCamelCase = num_switch_layers
_UpperCamelCase = num_ext_layers
_UpperCamelCase = num_switch_layers + num_ext_layers
_UpperCamelCase = num_heads
_UpperCamelCase = num_experts
_UpperCamelCase = expert_capacity
_UpperCamelCase = dropout_rate
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = router_bias
_UpperCamelCase = router_jitter_noise
_UpperCamelCase = router_dtype
_UpperCamelCase = router_ignore_padding_tokens
_UpperCamelCase = output_hidden_states
_UpperCamelCase = output_attentions
_UpperCamelCase = initializer_factor
_UpperCamelCase = output_router_logits
_UpperCamelCase = use_cache
super().__init__(
separator_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 709 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
if not is_accelerate_available():
return method
_UpperCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCAmelCase__ ) < version.parse("0.17.0" ):
return method
def wrapper(self , *a__ , **a__ ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCAmelCase__ , **UpperCAmelCase__ )
return wrapper
| 710 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any]=0.2 , lowercase_ : Tuple=0.2) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = bp_numa
_UpperCamelCase = bp_numa
_UpperCamelCase = bp_numa
_UpperCamelCase = conva_get[:2]
_UpperCamelCase = conva_get[2]
_UpperCamelCase = size_pa
_UpperCamelCase = rate_w
_UpperCamelCase = rate_t
_UpperCamelCase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_UpperCamelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_UpperCamelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_UpperCamelCase = -2 * np.random.rand(self.conva[1]) + 1
_UpperCamelCase = -2 * np.random.rand(self.num_bpa) + 1
_UpperCamelCase = -2 * np.random.rand(self.num_bpa) + 1
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(UpperCAmelCase_ , "wb") as f:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_)
print(f'Model saved: {save_path}')
@classmethod
def __UpperCAmelCase ( cls : List[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
with open(UpperCAmelCase_ , "rb") as f:
_UpperCamelCase = pickle.load(UpperCAmelCase_) # noqa: S301
_UpperCamelCase = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_UpperCamelCase = model_dic.get("size_pooling1")
_UpperCamelCase = model_dic.get("num_bp1")
_UpperCamelCase = model_dic.get("num_bp2")
_UpperCamelCase = model_dic.get("num_bp3")
_UpperCamelCase = model_dic.get("rate_weight")
_UpperCamelCase = model_dic.get("rate_thre")
# create model instance
_UpperCamelCase = CNN(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# modify model parameter
_UpperCamelCase = model_dic.get("w_conv1")
_UpperCamelCase = model_dic.get("wkj")
_UpperCamelCase = model_dic.get("vji")
_UpperCamelCase = model_dic.get("thre_conv1")
_UpperCamelCase = model_dic.get("thre_bp2")
_UpperCamelCase = model_dic.get("thre_bp3")
return conv_ins
def __UpperCAmelCase ( self : str , lowercase_ : Optional[Any]) -> Any:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x))
def __UpperCAmelCase ( self : Tuple , lowercase_ : int) -> List[str]:
"""simple docstring"""
return round(UpperCAmelCase_ , 3)
def __UpperCAmelCase ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = convs[0]
_UpperCamelCase = convs[1]
_UpperCamelCase = np.shape(UpperCAmelCase_)[0]
# get the data slice of original image data, data_focus
_UpperCamelCase = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_):
_UpperCamelCase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCAmelCase_)
# calculate the feature map of every single kernel, and saved as list of matrix
_UpperCamelCase = []
_UpperCamelCase = int((size_data - size_conv) / conv_step + 1)
for i_map in range(UpperCAmelCase_):
_UpperCamelCase = []
for i_focus in range(len(UpperCAmelCase_)):
_UpperCamelCase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCAmelCase_))
_UpperCamelCase = np.asmatrix(UpperCAmelCase_).reshape(
UpperCAmelCase_ , UpperCAmelCase_)
data_featuremap.append(UpperCAmelCase_)
# expanding the data slice to One dimenssion
_UpperCamelCase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCAmelCase_))
_UpperCamelCase = np.asarray(UpperCAmelCase_)
return focus_list, data_featuremap
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict="average_pool") -> Tuple:
"""simple docstring"""
_UpperCamelCase = len(featuremaps[0])
_UpperCamelCase = int(size_map / size_pooling)
_UpperCamelCase = []
for i_map in range(len(UpperCAmelCase_)):
_UpperCamelCase = featuremaps[i_map]
_UpperCamelCase = []
for i_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
for j_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
_UpperCamelCase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCAmelCase_))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCAmelCase_))
_UpperCamelCase = np.asmatrix(UpperCAmelCase_).reshape(UpperCAmelCase_ , UpperCAmelCase_)
featuremap_pooled.append(UpperCAmelCase_)
return featuremap_pooled
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(len(UpperCAmelCase_)):
_UpperCamelCase = np.shape(data[i])
_UpperCamelCase = data[i].reshape(1 , shapes[0] * shapes[1])
_UpperCamelCase = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCAmelCase_)
_UpperCamelCase = np.asarray(UpperCAmelCase_)
return data_expanded
def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> str:
"""simple docstring"""
_UpperCamelCase = np.asarray(UpperCAmelCase_)
_UpperCamelCase = np.shape(UpperCAmelCase_)
_UpperCamelCase = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = 0
for i_map in range(UpperCAmelCase_):
_UpperCamelCase = np.ones((size_map, size_map))
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
for j in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
_UpperCamelCase = pd_pool[
i_pool
]
_UpperCamelCase = i_pool + 1
_UpperCamelCase = np.multiply(
UpperCAmelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(UpperCAmelCase_)
return pd_all
def __UpperCAmelCase ( self : List[str] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict=bool) -> Optional[int]:
"""simple docstring"""
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(UpperCAmelCase_)))
print((" - - Shape: Teach_Data ", np.shape(UpperCAmelCase_)))
_UpperCamelCase = 0
_UpperCamelCase = []
_UpperCamelCase = 10000
while rp < n_repeat and mse >= error_accuracy:
_UpperCamelCase = 0
print(f'-------------Learning Time {rp}--------------')
for p in range(len(UpperCAmelCase_)):
# print('------------Learning Image: %d--------------'%p)
_UpperCamelCase = np.asmatrix(datas_train[p])
_UpperCamelCase = np.asarray(datas_teach[p])
_UpperCamelCase , _UpperCamelCase = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCamelCase = self.pooling(UpperCAmelCase_ , self.size_poolinga)
_UpperCamelCase = np.shape(UpperCAmelCase_)
_UpperCamelCase = self._expand(UpperCAmelCase_)
_UpperCamelCase = data_bp_input
_UpperCamelCase = np.dot(UpperCAmelCase_ , self.vji.T) - self.thre_bpa
_UpperCamelCase = self.sig(UpperCAmelCase_)
_UpperCamelCase = np.dot(UpperCAmelCase_ , self.wkj.T) - self.thre_bpa
_UpperCamelCase = self.sig(UpperCAmelCase_)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_UpperCamelCase = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCAmelCase_ , (1 - bp_outa)))
_UpperCamelCase = np.multiply(
np.dot(UpperCAmelCase_ , self.wkj) , np.multiply(UpperCAmelCase_ , (1 - bp_outa)))
_UpperCamelCase = np.dot(UpperCAmelCase_ , self.vji)
_UpperCamelCase = pd_i_all / (self.size_poolinga * self.size_poolinga)
_UpperCamelCase = pd_conva_pooled.T.getA().tolist()
_UpperCamelCase = self._calculate_gradient_from_pool(
UpperCAmelCase_ , UpperCAmelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_UpperCamelCase = self._expand_mat(pd_conva_all[k_conv])
_UpperCamelCase = self.rate_weight * np.dot(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCamelCase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_UpperCamelCase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_UpperCamelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_UpperCamelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_UpperCamelCase = self.thre_bpa - pd_k_all * self.rate_thre
_UpperCamelCase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_UpperCamelCase = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_UpperCamelCase = rp + 1
_UpperCamelCase = error_count / patterns
all_mse.append(UpperCAmelCase_)
def draw_error():
_UpperCamelCase = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(UpperCAmelCase_ , "+-")
plt.plot(UpperCAmelCase_ , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(UpperCAmelCase_ , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, f' - - Mse: {mse:.6f}'))
if draw_e:
draw_error()
return mse
def __UpperCAmelCase ( self : List[str] , lowercase_ : int) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(UpperCAmelCase_)))
for p in range(len(UpperCAmelCase_)):
_UpperCamelCase = np.asmatrix(datas_test[p])
_UpperCamelCase , _UpperCamelCase = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCamelCase = self.pooling(UpperCAmelCase_ , self.size_poolinga)
_UpperCamelCase = self._expand(UpperCAmelCase_)
_UpperCamelCase = data_bp_input
_UpperCamelCase = bp_outa * self.vji.T - self.thre_bpa
_UpperCamelCase = self.sig(UpperCAmelCase_)
_UpperCamelCase = bp_outa * self.wkj.T - self.thre_bpa
_UpperCamelCase = self.sig(UpperCAmelCase_)
produce_out.extend(bp_outa.getA().tolist())
_UpperCamelCase = [list(map(self.do_round , UpperCAmelCase_)) for each in produce_out]
return np.asarray(UpperCAmelCase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = np.asmatrix(UpperCAmelCase_)
_UpperCamelCase , _UpperCamelCase = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_UpperCamelCase = self.pooling(UpperCAmelCase_ , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 711 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 0 |
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , lowercase_ : int) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = num_of_nodes
_UpperCamelCase = []
_UpperCamelCase = {}
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Union[str, Any]:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight])
def __UpperCAmelCase ( self : Tuple , lowercase_ : int) -> List[Any]:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCAmelCase ( self : Tuple , lowercase_ : int) -> List[Any]:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
_UpperCamelCase = self.find_component(UpperCamelCase__)
def __UpperCAmelCase ( self : str , lowercase_ : list[int] , lowercase_ : int , lowercase_ : int) -> Optional[Any]:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
_UpperCamelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase__)
elif component_size[u_node] >= component_size[v_node]:
_UpperCamelCase = self.find_component(UpperCamelCase__)
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase__)
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
_UpperCamelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_UpperCamelCase = edge
_UpperCamelCase = self.m_component[u]
_UpperCamelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_UpperCamelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase__ , UpperCamelCase__):
_UpperCamelCase = edge
_UpperCamelCase = self.m_component[u]
_UpperCamelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n')
num_of_components -= 1
_UpperCamelCase = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}')
def lowerCAmelCase__ ( ) ->Optional[int]:
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase__ = '''src/diffusers'''
lowerCamelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCamelCase__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCamelCase__ = spec.loader.load_module()
def lowerCAmelCase__ ( a__ , a__ ) ->List[Any]:
'''simple docstring'''
return line.startswith(__UpperCamelCase ) or len(__UpperCamelCase ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , __UpperCamelCase ) is not None
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = object_name.split("." )
_UpperCamelCase = 0
# First let's find the module where our object lives.
_UpperCamelCase = parts[i]
while i < len(__UpperCamelCase ) and not os.path.isfile(os.path.join(__UpperCamelCase , f'{module}.py' ) ):
i += 1
if i < len(__UpperCamelCase ):
_UpperCamelCase = os.path.join(__UpperCamelCase , parts[i] )
if i >= len(__UpperCamelCase ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__UpperCamelCase , f'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCamelCase = f.readlines()
# Now let's find the class / func in the code!
_UpperCamelCase = ""
_UpperCamelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(__UpperCamelCase ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__UpperCamelCase ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCamelCase = line_index
while line_index < len(__UpperCamelCase ) and _should_continue(lines[line_index] , __UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase = lines[start_index:line_index]
return "".join(__UpperCamelCase )
lowerCamelCase__ = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCamelCase__ = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCamelCase__ = re.compile(R'''<FILL\s+[^>]*>''')
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = code.split("\n" )
_UpperCamelCase = 0
while idx < len(__UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__UpperCamelCase ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = len(get_indent(__UpperCamelCase ) ) > 0
if has_indent:
_UpperCamelCase = f'class Bla:\n{code}'
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__UpperCamelCase )
_UpperCamelCase = black.format_str(__UpperCamelCase , mode=__UpperCamelCase )
_UpperCamelCase , _UpperCamelCase = style_docstrings_in_code(__UpperCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCAmelCase__ ( a__ , a__=False ) ->Tuple:
'''simple docstring'''
with open(__UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = []
_UpperCamelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__UpperCamelCase ):
_UpperCamelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = search.groups()
_UpperCamelCase = find_code_in_diffusers(__UpperCamelCase )
_UpperCamelCase = get_indent(__UpperCamelCase )
_UpperCamelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCamelCase = theoretical_indent
_UpperCamelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCamelCase = True
while line_index < len(__UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__UpperCamelCase ):
break
_UpperCamelCase = lines[line_index]
_UpperCamelCase = _should_continue(__UpperCamelCase , __UpperCamelCase ) and re.search(f'^{indent}# End copy' , __UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCamelCase = lines[start_index:line_index]
_UpperCamelCase = "".join(__UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCamelCase = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__UpperCamelCase ) is None]
_UpperCamelCase = "\n".join(__UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__UpperCamelCase ) > 0:
_UpperCamelCase = replace_pattern.replace("with" , "" ).split("," )
_UpperCamelCase = [_re_replace_pattern.search(__UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = pattern.groups()
_UpperCamelCase = re.sub(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if option.strip() == "all-casing":
_UpperCamelCase = re.sub(obja.lower() , obja.lower() , __UpperCamelCase )
_UpperCamelCase = re.sub(obja.upper() , obja.upper() , __UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCamelCase = blackify(lines[start_index - 1] + theoretical_code )
_UpperCamelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCamelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCamelCase = start_index + 1
if overwrite and len(__UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(__UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__UpperCamelCase )
return diffs
def lowerCAmelCase__ ( a__ = False ) ->Any:
'''simple docstring'''
_UpperCamelCase = glob.glob(os.path.join(__UpperCamelCase , "**/*.py" ) , recursive=__UpperCamelCase )
_UpperCamelCase = []
for filename in all_files:
_UpperCamelCase = is_copy_consistent(__UpperCamelCase , __UpperCamelCase )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__UpperCamelCase ) > 0:
_UpperCamelCase = "\n".join(__UpperCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 713 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any]=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Optional[Any]=True , lowercase_ : Tuple=True , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : Any=99 , lowercase_ : List[str]=32 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=4 , lowercase_ : Any=37 , lowercase_ : List[str]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=512 , lowercase_ : List[str]=16 , lowercase_ : Dict=2 , lowercase_ : int=0.02 , lowercase_ : List[Any]=False , lowercase_ : str=True , lowercase_ : Tuple="None" , lowercase_ : Union[str, Any]=3 , lowercase_ : List[str]=4 , lowercase_ : int=None , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFDebertaVaModel(config=lowercase_)
_UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFDebertaVaForMaskedLM(config=lowercase_)
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForSequenceClassification(config=lowercase_)
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForTokenClassification(config=lowercase_)
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCAmelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = TFDebertaVaForQuestionAnswering(config=lowercase_)
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
_UpperCamelCase
) = config_and_inputs
_UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
__A = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__A = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__A = False
__A = False
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFDebertaVaModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
@slow
def __UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge")
self.assertIsNotNone(lowercase_)
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet")
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def __UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge")
_UpperCamelCase = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
_UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_)[0]
_UpperCamelCase = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowercase_ , atol=1e-4)
| 714 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _UpperCAmelCase ( lowercase__ ):
'''simple docstring'''
__A = 42
__A = 42
__A = None
class _UpperCAmelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
__A = 2
@register_to_config
def __init__( self : List[str] , lowercase_ : float = 0.02 , lowercase_ : float = 100 , lowercase_ : float = 1.0_07 , lowercase_ : float = 80 , lowercase_ : float = 0.05 , lowercase_ : float = 50 , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = sigma_max
# setable values
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None # sigma(t_i)
def __UpperCAmelCase ( self : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None) -> int:
"""simple docstring"""
return sample
def __UpperCAmelCase ( self : Any , lowercase_ : int , lowercase_ : Union[str, torch.device] = None) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = num_inference_steps
_UpperCamelCase = np.arange(0 , self.num_inference_steps)[::-1].copy()
_UpperCamelCase = torch.from_numpy(__lowercase).to(__lowercase)
_UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_UpperCamelCase = torch.tensor(__lowercase , dtype=torch.floataa , device=__lowercase)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : float , lowercase_ : Optional[torch.Generator] = None) -> str:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
_UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=__lowercase).to(sample.device)
_UpperCamelCase = sigma + gamma * sigma
_UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCAmelCase ( self : Dict , lowercase_ : torch.FloatTensor , lowercase_ : float , lowercase_ : float , lowercase_ : torch.FloatTensor , lowercase_ : bool = True , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = sample_hat + sigma_hat * model_output
_UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
_UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__lowercase , derivative=__lowercase , pred_original_sample=__lowercase)
def __UpperCAmelCase ( self : Tuple , lowercase_ : torch.FloatTensor , lowercase_ : float , lowercase_ : float , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : bool = True , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = sample_prev + sigma_prev * model_output
_UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
_UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__lowercase , derivative=__lowercase , pred_original_sample=__lowercase)
def __UpperCAmelCase ( self : Dict , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any]) -> str:
"""simple docstring"""
raise NotImplementedError()
| 715 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(A_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(A_)
_UpperCamelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(A_)
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(A_ , A_)
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(A_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(A_)
_UpperCamelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
_UpperCamelCase = TextIteratorStreamer(A_)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=A_)
thread.start()
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A_ , A_)
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(A_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(A_)
_UpperCamelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_)
_UpperCamelCase = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(A_ , skip_prompt=A_)
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(A_ , A_)
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("distilgpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("distilgpt2").to(A_)
_UpperCamelCase = -1
_UpperCamelCase = torch.ones((1, 5) , device=A_).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(A_ , skip_special_tokens=A_)
model.generate(A_ , max_new_tokens=1 , do_sample=A_ , streamer=A_)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase = tokenizer(A_ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(A_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(A_)
_UpperCamelCase = TextIteratorStreamer(A_ , timeout=0.0_01)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=A_)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A_):
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
| 716 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase__ = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = BigBirdTokenizer
__A = ["input_ids", "attention_mask"]
__A = []
def __init__( self : Tuple , lowercase_ : Union[str, Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]="<unk>" , lowercase_ : int="<s>" , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<pad>" , lowercase_ : Tuple="[SEP]" , lowercase_ : Union[str, Any]="[MASK]" , lowercase_ : str="[CLS]" , **lowercase_ : Optional[int] , ) -> Dict:
"""simple docstring"""
_UpperCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else bos_token
_UpperCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else eos_token
_UpperCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else unk_token
_UpperCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else pad_token
_UpperCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else cls_token
_UpperCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__) if isinstance(lowerCamelCase__ , lowerCamelCase__) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def __UpperCAmelCase ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__)) + [1]
return [1] + ([0] * len(lowerCamelCase__)) + [1] + ([0] * len(lowerCamelCase__)) + [1]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowerCamelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase__):
copyfile(self.vocab_file , lowerCamelCase__)
return (out_vocab_file,)
| 718 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCAmelCase__ ( a__ = 2_000_000 ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = [0]
_UpperCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_UpperCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
_UpperCamelCase = 0
# an estimate of b, using the quadratic formula
_UpperCamelCase = 42
# the largest integer less than b_estimate
_UpperCamelCase = 42
# the largest integer less than b_estimate
_UpperCamelCase = 42
# the triangle number corresponding to b_floor
_UpperCamelCase = 42
# the triangle number corresponding to b_ceil
_UpperCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_UpperCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_UpperCamelCase = floor(lowercase__ )
_UpperCamelCase = ceil(lowercase__ )
_UpperCamelCase = triangle_numbers[b_floor]
_UpperCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_UpperCamelCase = triangle_b_first_guess * triangle_a
_UpperCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_UpperCamelCase = triangle_b_second_guess * triangle_a
_UpperCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 719 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , config_name=__a)
_UpperCamelCase = GenerationConfig.from_pretrained(__a , config_name=__a)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __a)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , __a)
def __UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
_UpperCamelCase = AutoConfig.from_pretrained("gpt2")
_UpperCamelCase = GenerationConfig.from_model_config(__a)
_UpperCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__a , __a)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = GenerationConfig()
_UpperCamelCase = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
_UpperCamelCase = copy.deepcopy(__a)
_UpperCamelCase = generation_config.update(**__a)
# update_kwargs was not modified (no side effects)
self.assertEqual(__a , __a)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__a , {"foo": "bar"})
def __UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = GenerationConfig()
_UpperCamelCase = """bar"""
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__a)
_UpperCamelCase = GenerationConfig.from_pretrained(__a)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_UpperCamelCase = GenerationConfig.from_model_config(__a)
assert not hasattr(__a , "foo") # no new kwargs should be initialized if from config
def __UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __a)
self.assertEqual(default_config.num_beams , 1)
_UpperCamelCase = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __a)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a)
_UpperCamelCase = GenerationConfig.from_pretrained(__a , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __a)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls : Any) -> int:
"""simple docstring"""
_UpperCamelCase = TOKEN
HfFolder.save_token(__a)
@classmethod
def __UpperCAmelCase ( cls : List[Any]) -> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_UpperCamelCase = GenerationConfig.from_pretrained(f'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="test-generation-config" , push_to_hub=__a , use_auth_token=self._token)
_UpperCamelCase = GenerationConfig.from_pretrained(f'{USER}/test-generation-config')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a))
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_UpperCamelCase = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="valid_org/test-generation-config-org" , push_to_hub=__a , use_auth_token=self._token)
_UpperCamelCase = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a))
| 720 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''timm_backbone'''
def __init__( self : Any , lowercase_ : List[Any]=None , lowercase_ : str=3 , lowercase_ : str=True , lowercase_ : str=True , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase__)
_UpperCamelCase = backbone
_UpperCamelCase = num_channels
_UpperCamelCase = features_only
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = True
_UpperCamelCase = out_indices if out_indices is not None else (-1,)
| 721 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCamelCase__,lowerCamelCase__,lowerCamelCase__ = False, False, False
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
__A = None
__A = True
__A = True
__A = None
# Automatically constructed
__A = '''dict'''
__A = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
__A = field(default='''Audio''', init=__a, repr=__a )
def __call__( self : int) -> Optional[Any]:
"""simple docstring"""
return self.pa_type
def __UpperCAmelCase ( self : int , lowercase_ : List[Any]) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install \'soundfile\'.") from err
if isinstance(A__ , A__):
return {"bytes": None, "path": value}
elif isinstance(A__ , A__):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_UpperCamelCase = BytesIO()
sf.write(A__ , value["array"] , value["sampling_rate"] , format="wav")
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm"):
# "PCM" only has raw audio bytes
if value.get("sampling_rate") is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a \'sampling_rate\' in Audio object")
if value.get("bytes"):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_UpperCamelCase = np.frombuffer(value["bytes"] , dtype=np.intaa).astype(np.floataa) / 32767
else:
_UpperCamelCase = np.memmap(value["path"] , dtype="h" , mode="r").astype(np.floataa) / 32767
_UpperCamelCase = BytesIO(bytes())
sf.write(A__ , A__ , value["sampling_rate"] , format="wav")
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def __UpperCAmelCase ( self : int , lowercase_ : str , lowercase_ : List[str] = None) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
_UpperCamelCase , _UpperCamelCase = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.')
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install \'librosa\' and \'soundfile\'.") from err
_UpperCamelCase = xsplitext(A__)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ")
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ")
if file is None:
_UpperCamelCase = token_per_repo_id or {}
_UpperCamelCase = path.split("::")[-1]
try:
_UpperCamelCase = string_to_dict(A__ , config.HUB_DATASETS_URL)["repo_id"]
_UpperCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_UpperCamelCase = None
with xopen(A__ , "rb" , use_auth_token=A__) as f:
_UpperCamelCase , _UpperCamelCase = sf.read(A__)
else:
_UpperCamelCase , _UpperCamelCase = sf.read(A__)
_UpperCamelCase = array.T
if self.mono:
_UpperCamelCase = librosa.to_mono(A__)
if self.sampling_rate and self.sampling_rate != sampling_rate:
_UpperCamelCase = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate)
_UpperCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __UpperCAmelCase ( self : List[Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature.")
return {
"bytes": Value("binary"),
"path": Value("string"),
}
def __UpperCAmelCase ( self : List[str] , lowercase_ : Tuple) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type):
_UpperCamelCase = pa.array([None] * len(A__) , type=pa.binary())
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
_UpperCamelCase = pa.array([None] * len(A__) , type=pa.string())
_UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
_UpperCamelCase = pa.array([Audio().encode_example(A__) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
_UpperCamelCase = storage.field("bytes")
else:
_UpperCamelCase = pa.array([None] * len(A__) , type=pa.binary())
if storage.type.get_field_index("path") >= 0:
_UpperCamelCase = storage.field("path")
else:
_UpperCamelCase = pa.array([None] * len(A__) , type=pa.string())
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null())
return array_cast(A__ , self.pa_type)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Tuple) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : Dict):
with xopen(A__ , "rb") as f:
_UpperCamelCase = f.read()
return bytes_
_UpperCamelCase = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_UpperCamelCase = pa.array(
[os.path.basename(A__) if path is not None else None for path in storage.field("path").to_pylist()] , type=pa.string() , )
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(A__ , self.pa_type)
| 700 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 0 |
def lowerCAmelCase__ ( a__ , a__ ) ->float:
'''simple docstring'''
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
_UpperCamelCase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) )
return round(_SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
_UpperCamelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
_UpperCamelCase = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
_UpperCamelCase = tf_top_k_top_p_filtering(__UpperCamelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4)
_UpperCamelCase = output[output != -float("inf")]
_UpperCamelCase = tf.cast(
tf.where(tf.not_equal(__UpperCamelCase , tf.constant(-float("inf") , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-1_2)
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase)
@require_tf
class _UpperCAmelCase ( unittest.TestCase, __snake_case ):
'''simple docstring'''
if is_tf_available():
__A = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def __UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = 2
_UpperCamelCase = 2
class _UpperCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super(__UpperCamelCase , self).__init__()
_UpperCamelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids"),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask"),
) , jit_compile=__UpperCamelCase , )
def __UpperCAmelCase ( self : Any , lowercase_ : int , lowercase_ : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.generate(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , max_new_tokens=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , )
return {"sequences": outputs["sequences"]}
_UpperCamelCase = [[2, 0], [102, 103]]
_UpperCamelCase = [[1, 0], [1, 1]]
_UpperCamelCase = DummyModel(model=__UpperCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={"serving_default": dummy_model.serving})
_UpperCamelCase = tf.saved_model.load(__UpperCamelCase).signatures["serving_default"]
for batch_size in range(1 , len(__UpperCamelCase) + 1):
_UpperCamelCase = {
"input_ids": tf.constant(dummy_input_ids[:batch_size]),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size]),
}
_UpperCamelCase = serving_func(**__UpperCamelCase)["sequences"]
_UpperCamelCase = test_model.generate(**__UpperCamelCase , max_new_tokens=__UpperCamelCase)
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase)
@slow
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = 1
_UpperCamelCase = 2
class _UpperCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : List[str]) -> Optional[Any]:
"""simple docstring"""
super(__UpperCamelCase , self).__init__()
_UpperCamelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids"),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask"),
) , jit_compile=__UpperCamelCase , )
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model.generate(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , max_new_tokens=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , )
return {"sequences": outputs["sequences"]}
_UpperCamelCase = [[2], [102, 103]]
_UpperCamelCase = [[1], [1, 1]]
_UpperCamelCase = DummyModel(model=__UpperCamelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={"serving_default": dummy_model.serving})
_UpperCamelCase = tf.saved_model.load(__UpperCamelCase).signatures["serving_default"]
for input_row in range(len(__UpperCamelCase)):
_UpperCamelCase = {
"input_ids": tf.constant([dummy_input_ids[input_row]]),
"attention_mask": tf.constant([dummy_attention_masks[input_row]]),
}
_UpperCamelCase = serving_func(**__UpperCamelCase)["sequences"]
_UpperCamelCase = test_model.generate(**__UpperCamelCase , max_new_tokens=__UpperCamelCase)
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase)
@slow
@require_tensorflow_text
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=__UpperCamelCase)
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCamelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__UpperCamelCase , "spiece.model") , "rb").read())
_UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5")
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[str] , *lowercase_ : Optional[int] , **lowercase_ : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.tokenize(__UpperCamelCase)
_UpperCamelCase , _UpperCamelCase = text.pad_model_inputs(
__UpperCamelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id)
_UpperCamelCase = self.model.generate(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase)
return self.tokenizer.detokenize(__UpperCamelCase)
_UpperCamelCase = CompleteSentenceTransformer()
_UpperCamelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs")
_UpperCamelCase = complete_model(__UpperCamelCase)
_UpperCamelCase = tf.keras.Model(__UpperCamelCase , __UpperCamelCase)
keras_model.save(__UpperCamelCase)
def __UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
_UpperCamelCase = 14
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = "Hello, my dog is cute and"
_UpperCamelCase = tokenizer(__UpperCamelCase , return_tensors="tf")
_UpperCamelCase = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
_UpperCamelCase = model.generate(**__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
_UpperCamelCase = [638, 198]
with tf.device(":/CPU:0"):
tf.random.set_seed(0)
_UpperCamelCase = model.generate(**__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
_UpperCamelCase = "Hugging Face is a technology company based in New York and Paris."
_UpperCamelCase = bart_tokenizer(__UpperCamelCase , return_tensors="tf").input_ids
_UpperCamelCase = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart")
_UpperCamelCase = bart_model.generate(__UpperCamelCase).numpy()
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self : Any , lowercase_ : int , lowercase_ : List[str]=None , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
return super().call(__UpperCamelCase , **__UpperCamelCase)
_UpperCamelCase = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart")
_UpperCamelCase = bart_model.generate(__UpperCamelCase , foo="bar").numpy()
self.assertTrue(np.array_equal(__UpperCamelCase , __UpperCamelCase))
class _UpperCAmelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Optional[Any] , **lowercase_ : str) -> Any:
"""simple docstring"""
return super().call(__UpperCamelCase , **__UpperCamelCase)
_UpperCamelCase = FakeEncoder(bart_model.config , bart_model.model.shared)
_UpperCamelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
_UpperCamelCase = bart_model.generate(__UpperCamelCase).numpy()
with self.assertRaises(__UpperCamelCase):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__UpperCamelCase , foo="bar")
| 702 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 0 |
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
move_disk(lowerCAmelCase_ , lowerCAmelCase_ )
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase__ ( a__ , a__ ) ->List[str]:
'''simple docstring'''
print("moving disk from" , lowerCAmelCase_ , "to" , lowerCAmelCase_ )
def lowerCAmelCase__ ( ) ->Any:
'''simple docstring'''
_UpperCamelCase = int(input("Height of hanoi: " ).strip() )
move_tower(lowerCAmelCase_ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 703 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 0 |
from math import sqrt
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE__ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE__ ):
total += i
return total - n
def lowerCAmelCase__ ( a__ = 10_000 ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE__ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE__ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 704 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A = None
__A = None
@property
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(__snake_case , "feature_size"))
self.assertTrue(hasattr(__snake_case , "sampling_rate"))
self.assertTrue(hasattr(__snake_case , "padding_value"))
def __UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(__snake_case) == len(__snake_case) for x, y in zip(__snake_case , processed_features[input_name])))
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case)
_UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type="np")
_UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case)
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type="pt")
_UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__snake_case)
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type="tf")
_UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple=False) -> Any:
"""simple docstring"""
def _inputs_have_equal_length(lowercase_ : int):
_UpperCamelCase = len(input[0])
for input_slice in input[1:]:
if len(__snake_case) != length:
return False
return True
def _inputs_are_equal(lowercase_ : Tuple , lowercase_ : Tuple):
if len(__snake_case) != len(__snake_case):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case):
if not np.allclose(np.asarray(__snake_case) , np.asarray(__snake_case) , atol=1e-3):
return False
return True
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case)
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
_UpperCamelCase = self.feat_extract_tester.seq_length_diff
_UpperCamelCase = self.feat_extract_tester.max_seq_length + pad_diff
_UpperCamelCase = self.feat_extract_tester.min_seq_length
_UpperCamelCase = self.feat_extract_tester.batch_size
_UpperCamelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_UpperCamelCase = feat_extract.pad(__snake_case , padding=__snake_case)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(__snake_case , padding="longest")
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(__snake_case , padding="max_length" , max_length=len(speech_inputs[-1]))
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(__snake_case , padding="longest" , return_tensors="np")
_UpperCamelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__snake_case):
feat_extract.pad(__snake_case , padding="max_length")[input_name]
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , max_length=__snake_case , return_tensors="np")
_UpperCamelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__snake_case))
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
_UpperCamelCase = feat_extract.pad(__snake_case , pad_to_multiple_of=10)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(__snake_case , padding="longest" , pad_to_multiple_of=10)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , pad_to_multiple_of=10 , max_length=__snake_case)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , pad_to_multiple_of=10 , max_length=__snake_case , return_tensors="np" , )
_UpperCamelCase = input_a[input_name]
self.assertTrue(all(len(__snake_case) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case))
_UpperCamelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__snake_case) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
_UpperCamelCase = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3)
def __UpperCAmelCase ( self : Dict , lowercase_ : Union[str, Any]=False) -> Dict:
"""simple docstring"""
def _inputs_have_equal_length(lowercase_ : List[str]):
_UpperCamelCase = len(input[0])
for input_slice in input[1:]:
if len(__snake_case) != length:
return False
return True
def _inputs_are_equal(lowercase_ : Dict , lowercase_ : List[str]):
if len(__snake_case) != len(__snake_case):
return False
for input_slice_a, input_slice_a in zip(__snake_case , __snake_case):
if not np.allclose(np.asarray(__snake_case) , np.asarray(__snake_case) , atol=1e-3):
return False
return True
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=__snake_case)
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
# truncate to smallest
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , max_length=len(speech_inputs[0]) , truncation=__snake_case)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(__snake_case , padding="max_length" , max_length=len(speech_inputs[0]))
_UpperCamelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertFalse(_inputs_have_equal_length(__snake_case))
# truncate to smallest with np
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np" , truncation=__snake_case , )
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np")
_UpperCamelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case))
# truncate to middle
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=__snake_case , return_tensors="np" , )
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=__snake_case)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , max_length=len(speech_inputs[1]) , return_tensors="np")
_UpperCamelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertTrue(_inputs_are_equal(__snake_case , __snake_case))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__snake_case))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case):
feat_extract.pad(__snake_case , truncation=__snake_case)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case):
feat_extract.pad(__snake_case , padding="longest" , truncation=__snake_case)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__snake_case):
feat_extract.pad(__snake_case , padding="longest" , truncation=__snake_case)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__snake_case):
feat_extract.pad(__snake_case , padding="max_length" , truncation=__snake_case)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_UpperCamelCase = 12
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__snake_case , )
_UpperCamelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_UpperCamelCase = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
_UpperCamelCase = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(__snake_case))
self.assertFalse(_inputs_have_equal_length(__snake_case))
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
self._check_padding(numpify=__snake_case)
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
self._check_padding(numpify=__snake_case)
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self._check_truncation(numpify=__snake_case)
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=__snake_case)
@require_torch
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
_UpperCamelCase = feat_extract.pad(__snake_case , padding="longest" , return_tensors="np")[input_name]
_UpperCamelCase = feat_extract.pad(__snake_case , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
_UpperCamelCase = feat_extract.pad(__snake_case , padding="longest" , return_tensors="np")[input_name]
_UpperCamelCase = feat_extract.pad(__snake_case , padding="longest" , return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2)
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.feat_extract_dict
_UpperCamelCase = True
_UpperCamelCase = self.feature_extraction_class(**__snake_case)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase = [len(__snake_case) for x in speech_inputs]
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
_UpperCamelCase = feat_extract.pad(__snake_case , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , __snake_case)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , __snake_case)
def __UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
_UpperCamelCase = self.feat_extract_dict
_UpperCamelCase = True
_UpperCamelCase = self.feature_extraction_class(**__snake_case)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase = [len(__snake_case) for x in speech_inputs]
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
_UpperCamelCase = min(__snake_case)
_UpperCamelCase = feat_extract.pad(
__snake_case , padding="max_length" , max_length=__snake_case , truncation=__snake_case , return_tensors="np")
self.assertIn("attention_mask" , __snake_case)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 705 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
__A = "encodec"
def __init__( self : List[str] , lowercase_ : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase_ : Tuple=24000 , lowercase_ : List[Any]=1 , lowercase_ : Union[str, Any]=False , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : str=128 , lowercase_ : Any=32 , lowercase_ : Any=1 , lowercase_ : List[Any]=[8, 5, 4, 2] , lowercase_ : Union[str, Any]="weight_norm" , lowercase_ : str=7 , lowercase_ : Optional[int]=7 , lowercase_ : Any=3 , lowercase_ : Tuple=2 , lowercase_ : Optional[Any]=True , lowercase_ : List[str]="reflect" , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=2 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Optional[Any]=1024 , lowercase_ : Optional[Any]=None , lowercase_ : str=True , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = target_bandwidths
_UpperCamelCase = sampling_rate
_UpperCamelCase = audio_channels
_UpperCamelCase = normalize
_UpperCamelCase = chunk_length_s
_UpperCamelCase = overlap
_UpperCamelCase = hidden_size
_UpperCamelCase = num_filters
_UpperCamelCase = num_residual_layers
_UpperCamelCase = upsampling_ratios
_UpperCamelCase = norm_type
_UpperCamelCase = kernel_size
_UpperCamelCase = last_kernel_size
_UpperCamelCase = residual_kernel_size
_UpperCamelCase = dilation_growth_rate
_UpperCamelCase = use_causal_conv
_UpperCamelCase = pad_mode
_UpperCamelCase = compress
_UpperCamelCase = num_lstm_layers
_UpperCamelCase = trim_right_ratio
_UpperCamelCase = codebook_size
_UpperCamelCase = codebook_dim if codebook_dim is not None else hidden_size
_UpperCamelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}')
super().__init__(**lowercase_)
@property
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 706 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''MobileNetV2FeatureExtractor''']
lowerCamelCase__ = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
_UpperCamelCase = AutoTokenizer.from_pretrained("google/mt5-small")
_UpperCamelCase = tokenizer("Hello there" , return_tensors="np").input_ids
_UpperCamelCase = tokenizer("Hi I am" , return_tensors="np").input_ids
_UpperCamelCase = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id)
_UpperCamelCase = model(_snake_case , decoder_input_ids=_snake_case).logits
_UpperCamelCase = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1])).mean()
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 708 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 0 |
from __future__ import annotations
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(a__ ):
print(f'{i}\t\t{d}' )
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->List[Any]:
'''simple docstring'''
for j in range(a__ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = [float("inf" )] * vertex_count
_UpperCamelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(a__ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
_UpperCamelCase = distance[u] + w
_UpperCamelCase = check_negative_cycle(a__ , a__ , a__ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCamelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCamelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCamelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCamelCase__ = {"""src""": src, """dst""": dest, """weight""": weight}
lowerCamelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCamelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 709 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 0 |
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[str] = 0) -> None:
"""simple docstring"""
_UpperCamelCase = row, column
_UpperCamelCase = [[default_value for c in range(_a)] for r in range(_a)]
def __str__( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
_UpperCamelCase = 0
for row_vector in self.array:
for obj in row_vector:
_UpperCamelCase = max(_a , len(str(_a)))
_UpperCamelCase = f'%{max_element_length}s'
# Make string and return
def single_line(lowercase_ : List[Any]) -> str:
nonlocal string_format_identifier
_UpperCamelCase = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(_a) for row_vector in self.array)
return s
def __repr__( self : Tuple) -> str:
"""simple docstring"""
return str(self)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : List[Any]) -> bool:
"""simple docstring"""
if not (isinstance(_a , (list, tuple)) and len(_a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Tuple , lowercase_ : str) -> Any:
"""simple docstring"""
assert self.validate_indicies(_a)
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : int) -> None:
"""simple docstring"""
assert self.validate_indicies(_a)
_UpperCamelCase = value
def __add__( self : Union[str, Any] , lowercase_ : Dict) -> Matrix:
"""simple docstring"""
assert isinstance(_a , _a)
assert self.row == another.row and self.column == another.column
# Add
_UpperCamelCase = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase = self[r, c] + another[r, c]
return result
def __neg__( self : Any) -> Matrix:
"""simple docstring"""
_UpperCamelCase = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase = -self[r, c]
return result
def __sub__( self : List[str] , lowercase_ : str) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : int , lowercase_ : Union[str, Any]) -> Matrix:
"""simple docstring"""
if isinstance(_a , (int, float)): # Scalar multiplication
_UpperCamelCase = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase = self[r, c] * another
return result
elif isinstance(_a , _a): # Matrix multiplication
assert self.column == another.row
_UpperCamelCase = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_UpperCamelCase = f'Unsupported type given for another ({type(_a)})'
raise TypeError(_a)
def __UpperCAmelCase ( self : Union[str, Any]) -> Matrix:
"""simple docstring"""
_UpperCamelCase = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase = self[r, c]
return result
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : List[Any]) -> Any:
"""simple docstring"""
assert isinstance(_a , _a) and isinstance(_a , _a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_UpperCamelCase = v.transpose()
_UpperCamelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase__ ( ) ->str:
'''simple docstring'''
_UpperCamelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
_UpperCamelCase = 1
print(f'a^(-1) is {ainv}' )
# u, v
_UpperCamelCase = Matrix(3 , 1 , 0 )
_UpperCamelCase = 1, 2, -3
_UpperCamelCase = Matrix(3 , 1 , 0 )
_UpperCamelCase = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case_ , snake_case_ )}' )
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 710 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any]=7 , lowercase_ : List[str]=3 , lowercase_ : Tuple=18 , lowercase_ : Union[str, Any]=30 , lowercase_ : int=400 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=None , lowercase_ : List[str]=True , lowercase_ : str=None , lowercase_ : List[Any]=True , lowercase_ : List[str]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowercase_ : Tuple=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowercase_ : Dict=True , ) -> int:
"""simple docstring"""
_UpperCamelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_convert_rgb
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCAmelCase ( self : List[str] , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=False , lowercase_ : Tuple=False) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_UpperCamelCase = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
_UpperCamelCase = []
for i in range(self.batch_size):
_UpperCamelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_UpperCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1)) for x in image_inputs]
if torchify:
_UpperCamelCase = [torch.from_numpy(UpperCamelCase__) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
__A = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase__)
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def __UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 224, "width": 224})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
__A = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase__)
_UpperCamelCase = 3
@property
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def __UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 711 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__, unittest.TestCase ):
'''simple docstring'''
__A = MobileBertTokenizer
__A = MobileBertTokenizerFast
__A = True
__A = True
__A = filter_non_english
__A = "google/mobilebert-uncased"
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
super().setUp()
_UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
_UpperCamelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = "UNwant\u00E9d,running"
_UpperCamelCase = "unwanted, running"
return input_text, output_text
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer_class(self.vocab_file)
_UpperCamelCase = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase) , [9, 6, 7, 12, 10, 11])
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = "UNwant\u00E9d,running"
_UpperCamelCase = tokenizer.tokenize(_lowerCamelCase)
_UpperCamelCase = rust_tokenizer.tokenize(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
_UpperCamelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
_UpperCamelCase = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_lowerCamelCase)
_UpperCamelCase = rust_tokenizer.encode(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
# With lower casing
_UpperCamelCase = self.get_tokenizer(do_lower_case=_lowerCamelCase)
_UpperCamelCase = self.get_rust_tokenizer(do_lower_case=_lowerCamelCase)
_UpperCamelCase = "UNwant\u00E9d,running"
_UpperCamelCase = tokenizer.tokenize(_lowerCamelCase)
_UpperCamelCase = rust_tokenizer.tokenize(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
_UpperCamelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
_UpperCamelCase = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_lowerCamelCase)
_UpperCamelCase = rust_tokenizer.encode(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
def __UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def __UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=_lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=_lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=_lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = BasicTokenizer(do_lower_case=_lowerCamelCase , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCamelCase = {}
for i, token in enumerate(_lowerCamelCase):
_UpperCamelCase = i
_UpperCamelCase = WordpieceTokenizer(vocab=_lowerCamelCase , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def __UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def __UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def __UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
@slow
def __UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.tokenizer_class.from_pretrained("google/mobilebert-uncased")
_UpperCamelCase = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCamelCase)
_UpperCamelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCamelCase)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase)
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase)
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase)
_UpperCamelCase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
_UpperCamelCase = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
_UpperCamelCase = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , "do_lower_case") else False
_UpperCamelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = ["的", "人", "有"]
_UpperCamelCase = "".join(_lowerCamelCase)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase)
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase)
_UpperCamelCase = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
_UpperCamelCase = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase)
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
_UpperCamelCase = False
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase)
_UpperCamelCase = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase)
_UpperCamelCase = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
_UpperCamelCase = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase)
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase)
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase)
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCamelCase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(_lowerCamelCase)
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
| 712 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 0 |
def lowerCAmelCase__ ( a__ = 1_000 ) ->int:
'''simple docstring'''
_UpperCamelCase = 2**power
_UpperCamelCase = 0
while n:
_UpperCamelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 713 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCamelCase__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCamelCase = k.replace(a__ , a__ )
return k
def lowerCAmelCase__ ( a__ , a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = DEFAULTS.copy()
cfg_kwargs.update(a__ )
_UpperCamelCase = PegasusConfig(**a__ )
_UpperCamelCase = PegasusForConditionalGeneration(a__ )
_UpperCamelCase = torch_model.model.state_dict()
_UpperCamelCase = {}
for k, v in tf_weights.items():
_UpperCamelCase = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCamelCase = v.T
_UpperCamelCase = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCamelCase = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
_UpperCamelCase = mapping["shared.weight"]
_UpperCamelCase = mapping["shared.weight"]
_UpperCamelCase = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**a__ )
_UpperCamelCase , _UpperCamelCase = torch_model.model.load_state_dict(a__ , strict=a__ )
_UpperCamelCase = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def lowerCAmelCase__ ( a__="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
'''simple docstring'''
_UpperCamelCase = tf.train.list_variables(a__ )
_UpperCamelCase = {}
_UpperCamelCase = ["Adafactor", "global_step"]
for name, shape in tqdm(a__ , desc="converting tf checkpoint to dict" ):
_UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCamelCase = tf.train.load_variable(a__ , a__ )
_UpperCamelCase = array
return tf_weights
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = Path(a__ ).parent.name
_UpperCamelCase = task_specific_params[f'summarization_{dataset}']["max_position_embeddings"]
_UpperCamelCase = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
_UpperCamelCase = get_tf_weights_as_numpy(a__ )
_UpperCamelCase = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCamelCase = task_specific_params
_UpperCamelCase = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
_UpperCamelCase = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(a__ , Path(a__ ) / "pytorch_model.bin" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase__ = parser.parse_args()
if args.save_dir is None:
lowerCamelCase__ = Path(args.tf_ckpt_path).parent.name
lowerCamelCase__ = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 714 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase__ ( a__ , a__ , a__ = 10**-10 ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = a
while True:
_UpperCamelCase = Decimal(UpperCamelCase__ ) - (
Decimal(eval(UpperCamelCase__ ) ) / Decimal(eval(str(diff(UpperCamelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCamelCase__ ) ) < precision: # noqa: S307
return float(UpperCamelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 715 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class _UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = ['''input_ids''', '''attention_mask''']
__A = None
def __init__( self : int , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<unk>" , lowercase_ : List[str]="<s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : Dict="<pad>" , lowercase_ : str=False , lowercase_ : Optional[int]=False , **lowercase_ : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , __SCREAMING_SNAKE_CASE) != add_prefix_space:
_UpperCamelCase = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop("type"))
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**__SCREAMING_SNAKE_CASE)
_UpperCamelCase = add_prefix_space
def __UpperCAmelCase ( self : Dict , *lowercase_ : str , **lowercase_ : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = kwargs.get("is_split_into_words" , __SCREAMING_SNAKE_CASE)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
" pretokenized inputs.")
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def __UpperCAmelCase ( self : str , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = kwargs.get("is_split_into_words" , __SCREAMING_SNAKE_CASE)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
" pretokenized inputs.")
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def __UpperCAmelCase ( self : Any , lowercase_ : str , lowercase_ : Optional[str] = None) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
def __UpperCAmelCase ( self : Any , lowercase_ : "Conversation") -> int:
"""simple docstring"""
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE) + [self.eos_token_id])
if len(__SCREAMING_SNAKE_CASE) > self.model_max_length:
_UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 716 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __get__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Tuple=None) -> int:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
_UpperCamelCase = '__cached_' + self.fget.__name__
_UpperCamelCase = getattr(__a , __a , __a)
if cached is None:
_UpperCamelCase = self.fget(__a)
setattr(__a , __a , __a)
return cached
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if is_torch_fx_proxy(a__ ):
return True
if is_torch_available():
import torch
if isinstance(a__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(a__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(a__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(a__ , np.ndarray )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
return isinstance(a__ , np.ndarray )
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
return _is_numpy(a__ )
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
import torch
return isinstance(a__ , torch.Tensor )
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(a__ )
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
import torch
return isinstance(a__ , torch.device )
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(a__ )
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
import torch
if isinstance(a__ , a__ ):
if hasattr(a__ , a__ ):
_UpperCamelCase = getattr(a__ , a__ )
else:
return False
return isinstance(a__ , torch.dtype )
def lowerCAmelCase__ ( a__ ) ->List[str]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(a__ )
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
import tensorflow as tf
return isinstance(a__ , tf.Tensor )
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(a__ )
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(a__ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(a__ )
return type(a__ ) == tf.Tensor
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(a__ )
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(a__ , jnp.ndarray )
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(a__ )
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
if isinstance(a__ , (dict, UserDict) ):
return {k: to_py_obj(a__ ) for k, v in obj.items()}
elif isinstance(a__ , (list, tuple) ):
return [to_py_obj(a__ ) for o in obj]
elif is_tf_tensor(a__ ):
return obj.numpy().tolist()
elif is_torch_tensor(a__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(a__ ):
return np.asarray(a__ ).tolist()
elif isinstance(a__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
if isinstance(a__ , (dict, UserDict) ):
return {k: to_numpy(a__ ) for k, v in obj.items()}
elif isinstance(a__ , (list, tuple) ):
return np.array(a__ )
elif is_tf_tensor(a__ ):
return obj.numpy()
elif is_torch_tensor(a__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(a__ ):
return np.asarray(a__ )
else:
return obj
class _UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = fields(self)
# Safety and consistency checks
if not len(__a):
raise ValueError(f'{self.__class__.__name__} has no fields.')
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.')
_UpperCamelCase = getattr(self , class_fields[0].name)
_UpperCamelCase = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__a):
if isinstance(__a , __a):
_UpperCamelCase = first_field.items()
_UpperCamelCase = True
else:
try:
_UpperCamelCase = iter(__a)
_UpperCamelCase = True
except TypeError:
_UpperCamelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a):
if (
not isinstance(__a , (list, tuple))
or not len(__a) == 2
or not isinstance(element[0] , __a)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCamelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).')
break
setattr(self , element[0] , element[1])
if element[1] is not None:
_UpperCamelCase = element[1]
elif first_field is not None:
_UpperCamelCase = first_field
else:
for field in class_fields:
_UpperCamelCase = getattr(self , field.name)
if v is not None:
_UpperCamelCase = v
def __delitem__( self : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def __UpperCAmelCase ( self : Dict , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Optional[int] , **lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def __UpperCAmelCase ( self : Any , *lowercase_ : List[str] , **lowercase_ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __getitem__( self : Optional[int] , lowercase_ : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
if isinstance(__a , __a):
_UpperCamelCase = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a , __a)
super().__setattr__(__a , __a)
def __setitem__( self : Dict , lowercase_ : str , lowercase_ : int) -> Dict:
"""simple docstring"""
super().__setitem__(__a , __a)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a , __a)
def __UpperCAmelCase ( self : Optional[Any]) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys())
class _UpperCAmelCase ( UpperCamelCase_, UpperCamelCase_ ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls : List[str] , lowercase_ : str) -> List[str]:
"""simple docstring"""
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}')
class _UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
__A = '''longest'''
__A = '''max_length'''
__A = '''do_not_pad'''
class _UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
__A = '''pt'''
__A = '''tf'''
__A = '''np'''
__A = '''jax'''
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : str , lowercase_ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = context_managers
_UpperCamelCase = ExitStack()
def __enter__( self : str) -> Any:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__a)
def __exit__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[Any]) -> int:
"""simple docstring"""
self.stack.__exit__(*__a , **__a)
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = infer_framework(a__ )
if framework == "tf":
_UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = model_class.__name__
_UpperCamelCase = infer_framework(a__ )
if framework == "tf":
_UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCAmelCase__ ( a__ , a__ = "" , a__ = "." ) ->Any:
'''simple docstring'''
def _flatten_dict(a__ , a__="" , a__="." ):
for k, v in d.items():
_UpperCamelCase = str(a__ ) + delimiter + str(a__ ) if parent_key else k
if v and isinstance(a__ , a__ ):
yield from flatten_dict(a__ , a__ , delimiter=a__ ).items()
else:
yield key, v
return dict(_flatten_dict(a__ , a__ , a__ ) )
@contextmanager
def lowerCAmelCase__ ( a__ , a__ = False ) ->Union[str, Any]:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCAmelCase__ ( a__ , a__=None ) ->List[Any]:
'''simple docstring'''
if is_numpy_array(a__ ):
return np.transpose(a__ , axes=a__ )
elif is_torch_tensor(a__ ):
return array.T if axes is None else array.permute(*a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.transpose(a__ , perm=a__ )
elif is_jax_tensor(a__ ):
return jnp.transpose(a__ , axes=a__ )
else:
raise ValueError(f'Type not supported for transpose: {type(a__ )}.' )
def lowerCAmelCase__ ( a__ , a__ ) ->List[str]:
'''simple docstring'''
if is_numpy_array(a__ ):
return np.reshape(a__ , a__ )
elif is_torch_tensor(a__ ):
return array.reshape(*a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.reshape(a__ , a__ )
elif is_jax_tensor(a__ ):
return jnp.reshape(a__ , a__ )
else:
raise ValueError(f'Type not supported for reshape: {type(a__ )}.' )
def lowerCAmelCase__ ( a__ , a__=None ) ->Union[str, Any]:
'''simple docstring'''
if is_numpy_array(a__ ):
return np.squeeze(a__ , axis=a__ )
elif is_torch_tensor(a__ ):
return array.squeeze() if axis is None else array.squeeze(dim=a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.squeeze(a__ , axis=a__ )
elif is_jax_tensor(a__ ):
return jnp.squeeze(a__ , axis=a__ )
else:
raise ValueError(f'Type not supported for squeeze: {type(a__ )}.' )
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
if is_numpy_array(a__ ):
return np.expand_dims(a__ , a__ )
elif is_torch_tensor(a__ ):
return array.unsqueeze(dim=a__ )
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.expand_dims(a__ , axis=a__ )
elif is_jax_tensor(a__ ):
return jnp.expand_dims(a__ , axis=a__ )
else:
raise ValueError(f'Type not supported for expand_dims: {type(a__ )}.' )
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
if is_numpy_array(a__ ):
return np.size(a__ )
elif is_torch_tensor(a__ ):
return array.numel()
elif is_tf_tensor(a__ ):
import tensorflow as tf
return tf.size(a__ )
elif is_jax_tensor(a__ ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(a__ )}.' )
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(a__ , (tuple, list) ):
_UpperCamelCase = [f'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCamelCase = f'{repo_id}--{value}'
return auto_map
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
for base_class in inspect.getmro(a__ ):
_UpperCamelCase = base_class.__module__
_UpperCamelCase = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 717 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 0 |
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
_UpperCamelCase = int(_lowercase )
# Initialize Result
_UpperCamelCase = []
# Traverse through all denomination
for denomination in reversed(_lowercase ):
# Find denominations
while int(_lowercase ) >= int(_lowercase ):
total_value -= int(_lowercase )
answer.append(_lowercase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCamelCase__ = []
lowerCamelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCamelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"Denomination {i}: ").strip()))
lowerCamelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCamelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCamelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"Following is minimal change for {value}: ")
lowerCamelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 718 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCamelCase__ = '''\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n'''
lowerCamelCase__ = '''\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'''
lowerCamelCase__ = '''\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def __UpperCAmelCase ( self : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict=None , lowercase_ : List[str]=True , lowercase_ : int=False) -> Union[str, Any]:
"""simple docstring"""
if rouge_types is None:
_UpperCamelCase = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
_UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=_lowercase , use_stemmer=_lowercase)
if use_aggregator:
_UpperCamelCase = scoring.BootstrapAggregator()
else:
_UpperCamelCase = []
for ref, pred in zip(_lowercase , _lowercase):
_UpperCamelCase = scorer.score(_lowercase , _lowercase)
if use_aggregator:
aggregator.add_scores(_lowercase)
else:
scores.append(_lowercase)
if use_aggregator:
_UpperCamelCase = aggregator.aggregate()
else:
_UpperCamelCase = {}
for key in scores[0]:
_UpperCamelCase = [score[key] for score in scores]
return result
| 719 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = UniSpeechSatForSequenceClassification.from_pretrained(lowercase_ , config=lowercase_ )
_UpperCamelCase = downstream_dict["""projector.weight"""]
_UpperCamelCase = downstream_dict["""projector.bias"""]
_UpperCamelCase = downstream_dict["""model.post_net.linear.weight"""]
_UpperCamelCase = downstream_dict["""model.post_net.linear.bias"""]
return model
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase_ , config=lowercase_ )
_UpperCamelCase = downstream_dict["""model.linear.weight"""]
_UpperCamelCase = downstream_dict["""model.linear.bias"""]
return model
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = UniSpeechSatForXVector.from_pretrained(lowercase_ , config=lowercase_ )
_UpperCamelCase = downstream_dict["""connector.weight"""]
_UpperCamelCase = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCamelCase = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_UpperCamelCase = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_UpperCamelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
_UpperCamelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
_UpperCamelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
_UpperCamelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
_UpperCamelCase = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )
_UpperCamelCase = checkpoint["""Downstream"""]
_UpperCamelCase = UniSpeechSatConfig.from_pretrained(lowercase_ )
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
lowercase_ , return_attention_mask=lowercase_ , do_normalize=lowercase_ )
_UpperCamelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCamelCase = convert_classification(lowercase_ , lowercase_ , lowercase_ )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCamelCase = convert_diarization(lowercase_ , lowercase_ , lowercase_ )
elif arch.endswith("ForXVector" ):
_UpperCamelCase = convert_xvector(lowercase_ , lowercase_ , lowercase_ )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_UpperCamelCase = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
lowerCamelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 720 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase__ = {'''facebook/blenderbot_small-90M''': 512}
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
_UpperCamelCase = set(lowerCAmelCase__ )
return pairs
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['input_ids', 'attention_mask']
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : int="__start__" , lowercase_ : Optional[Any]="__end__" , lowercase_ : Dict="__unk__" , lowercase_ : Tuple="__null__" , **lowercase_ : List[str] , ) -> int:
"""simple docstring"""
super().__init__(unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a)
with open(_a , encoding="utf-8") as vocab_handle:
_UpperCamelCase = json.load(_a)
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8") as merges_handle:
_UpperCamelCase = merges_handle.read().split("\n")[1:-1]
_UpperCamelCase = [tuple(merge.split()) for merge in merges]
_UpperCamelCase = dict(zip(_a , range(len(_a))))
_UpperCamelCase = {}
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return len(self.encoder)
def __UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[int]) -> Optional[int]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_UpperCamelCase = re.sub("([.,!?()])" , R" \1" , _a)
_UpperCamelCase = re.sub("(\')" , R" \1 " , _a)
_UpperCamelCase = re.sub(R"\s{2,}" , " " , _a)
if "\n" in token:
_UpperCamelCase = token.replace("\n" , " __newln__")
_UpperCamelCase = token.split(" ")
_UpperCamelCase = []
for token in tokens:
if not len(_a):
continue
_UpperCamelCase = token.lower()
_UpperCamelCase = tuple(_a)
_UpperCamelCase = tuple(list(word[:-1]) + [word[-1] + "</w>"])
_UpperCamelCase = get_pairs(_a)
if not pairs:
words.append(_a)
continue
while True:
_UpperCamelCase = min(_a , key=lambda lowercase_: self.bpe_ranks.get(_a , float("inf")))
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(_a):
try:
_UpperCamelCase = word.index(_a , _a)
new_word.extend(word[i:j])
_UpperCamelCase = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(_a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCamelCase = tuple(_a)
_UpperCamelCase = new_word
if len(_a) == 1:
break
else:
_UpperCamelCase = get_pairs(_a)
_UpperCamelCase = "@@ ".join(_a)
_UpperCamelCase = word[:-4]
_UpperCamelCase = word
words.append(_a)
return " ".join(_a)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = re.findall(R"\S+\n?" , _a)
for token in words:
split_tokens.extend(list(self.bpe(_a).split(" ")))
return split_tokens
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = token.lower()
return self.encoder.get(_a , self.encoder.get(self.unk_token))
def __UpperCAmelCase ( self : Any , lowercase_ : List[Any]) -> List[str]:
"""simple docstring"""
return self.decoder.get(_a , self.unk_token)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = " ".join(_a).replace("@@ " , "").strip()
return out_string
def __UpperCAmelCase ( self : Any , lowercase_ : Dict , lowercase_ : List[str] = None) -> List[str]:
"""simple docstring"""
if not os.path.isdir(_a):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_UpperCamelCase = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a) + "\n")
_UpperCamelCase = 0
with open(_a , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_: kv[1]):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!")
_UpperCamelCase = token_index
writer.write(" ".join(_a) + "\n")
index += 1
return vocab_file, merge_file
| 721 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 0 |
def lowerCAmelCase__ ( a__ ) ->Tuple:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCamelCase = 1
_UpperCamelCase = 1
while repunit:
_UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCAmelCase__ ( a__ = 1_000_000 ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"{solution() = }")
| 700 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = 384
_UpperCamelCase = 7
if "tiny" in model_name:
_UpperCamelCase = 96
_UpperCamelCase = (2, 2, 6, 2)
_UpperCamelCase = (3, 6, 12, 24)
elif "small" in model_name:
_UpperCamelCase = 96
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (3, 6, 12, 24)
elif "base" in model_name:
_UpperCamelCase = 128
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (4, 8, 16, 32)
_UpperCamelCase = 12
_UpperCamelCase = 512
elif "large" in model_name:
_UpperCamelCase = 192
_UpperCamelCase = (2, 2, 18, 2)
_UpperCamelCase = (6, 12, 24, 48)
_UpperCamelCase = 12
_UpperCamelCase = 768
# set label information
_UpperCamelCase = 150
_UpperCamelCase = "huggingface/label-files"
_UpperCamelCase = "ade20k-id2label.json"
_UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type="dataset" ) , "r" ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = SwinConfig(
embed_dim=a__ , depths=a__ , num_heads=a__ , window_size=a__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
_UpperCamelCase = UperNetConfig(
backbone_config=a__ , auxiliary_in_channels=a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ , )
return config
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = dct.pop(a__ )
_UpperCamelCase = val
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
_UpperCamelCase = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:dim, :]
_UpperCamelCase = in_proj_bias[: dim]
_UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase = in_proj_bias[
dim : dim * 2
]
_UpperCamelCase = in_proj_weight[
-dim :, :
]
_UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase__ ( a__ ) ->List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = x.shape
_UpperCamelCase = x.reshape(a__ , 4 , in_channel // 4 )
_UpperCamelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(a__ , a__ )
return x
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = x.shape
_UpperCamelCase = x.reshape(a__ , in_channel // 4 , 4 )
_UpperCamelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(a__ , a__ )
return x
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = x.shape[0]
_UpperCamelCase = x.reshape(4 , in_channel // 4 )
_UpperCamelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(a__ )
return x
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = x.shape[0]
_UpperCamelCase = x.reshape(in_channel // 4 , 4 )
_UpperCamelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(a__ )
return x
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
_UpperCamelCase = model_name_to_url[model_name]
_UpperCamelCase = torch.hub.load_state_dict_from_url(a__ , map_location="cpu" , file_name=a__ )[
"state_dict"
]
for name, param in state_dict.items():
print(a__ , param.shape )
_UpperCamelCase = get_upernet_config(a__ )
_UpperCamelCase = UperNetForSemanticSegmentation(a__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_UpperCamelCase = state_dict.pop(a__ )
if "bn" in key:
_UpperCamelCase = key.replace("bn" , "batch_norm" )
_UpperCamelCase = val
# rename keys
_UpperCamelCase = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_UpperCamelCase = reverse_correct_unfold_reduction_order(a__ )
if "norm" in key:
_UpperCamelCase = reverse_correct_unfold_norm_order(a__ )
model.load_state_dict(a__ )
# verify on image
_UpperCamelCase = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_UpperCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert("RGB" )
_UpperCamelCase = SegformerImageProcessor()
_UpperCamelCase = processor(a__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
_UpperCamelCase = model(a__ )
_UpperCamelCase = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
_UpperCamelCase = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
_UpperCamelCase = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
_UpperCamelCase = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(a__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 701 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCamelCase__ = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : Union[str, Any] , **lowercase_ : Any) -> Any:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_UpperCamelCase = deprecated_arg[3:]
_UpperCamelCase = not kwargs.pop(lowercase__)
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}')
_UpperCamelCase = kwargs.pop("tpu_name" , self.tpu_name)
_UpperCamelCase = kwargs.pop("device_idx" , self.device_idx)
_UpperCamelCase = kwargs.pop("eager_mode" , self.eager_mode)
_UpperCamelCase = kwargs.pop("use_xla" , self.use_xla)
super().__init__(**lowercase__)
__A = field(
default=_UpperCAmelCase, metadata={'''help''': '''Name of TPU'''}, )
__A = field(
default=0, metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''}, )
__A = field(default=_UpperCAmelCase, metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase, metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
}, )
@cached_property
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["tf"])
_UpperCamelCase = None
if self.tpu:
try:
if self.tpu_name:
_UpperCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
else:
_UpperCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_UpperCamelCase = None
return tpu
@cached_property
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
requires_backends(self , ["tf"])
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu)
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
_UpperCamelCase = tf.distribute.TPUStrategy(self._setup_tpu)
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU")
_UpperCamelCase = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}')
else:
tf.config.set_visible_devices([] , "GPU") # disable GPU
_UpperCamelCase = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}')
return strategy
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
requires_backends(self , ["tf"])
return self._setup_tpu is not None
@property
def __UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
requires_backends(self , ["tf"])
return self._setup_strategy
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["tf"])
return tf.config.list_physical_devices("GPU")
@property
def __UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
requires_backends(self , ["tf"])
if self.cuda:
return len(self.gpu_list)
return 0
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.n_gpu > 0
| 702 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCamelCase__ ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : int=13 , lowercase_ : str=7 , lowercase_ : Any=True , lowercase_ : List[str]=True , lowercase_ : Any=False , lowercase_ : List[Any]=True , lowercase_ : List[str]=99 , lowercase_ : Optional[Any]=32 , lowercase_ : List[str]=5 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : int=512 , lowercase_ : Tuple=16 , lowercase_ : List[str]=2 , lowercase_ : int=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : Any=4 , lowercase_ : Union[str, Any]=None , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=2 , lowercase_ : int=2 , lowercase_ : Dict=2 , lowercase_ : List[str]=4 , lowercase_ : str=1 , ) -> Any:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = q_groups
_UpperCamelCase = k_groups
_UpperCamelCase = v_groups
_UpperCamelCase = post_attention_groups
_UpperCamelCase = intermediate_groups
_UpperCamelCase = output_groups
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = SqueezeBertModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , lowercase_)
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : str , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForMaskedLM(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int) -> str:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SqueezeBertForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Any , lowercase_ : int , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SqueezeBertForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCAmelCase ( self : str , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = SqueezeBertForMultipleChoice(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(_UpperCamelCase) = config_and_inputs
_UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__A = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = False
__A = True
__A = False
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = SqueezeBertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ , dim=37)
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase_)
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_)
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_)
@slow
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = SqueezeBertModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
_UpperCamelCase = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]])
_UpperCamelCase = model(lowercase_)[0]
_UpperCamelCase = torch.Size((1, 3))
self.assertEqual(output.shape , lowercase_)
_UpperCamelCase = torch.tensor([[0.64_01, -0.03_49, -0.60_41]])
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-4))
| 703 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 0 |
import math
def lowerCAmelCase__ ( a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = len(__lowercase )
_UpperCamelCase = int(math.floor(math.sqrt(__lowercase ) ) )
_UpperCamelCase = 0
while arr[min(__lowercase , __lowercase ) - 1] < x:
_UpperCamelCase = step
step += int(math.floor(math.sqrt(__lowercase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCamelCase = prev + 1
if prev == min(__lowercase , __lowercase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
lowerCamelCase__ = int(input('''Enter the number to be searched:\n'''))
lowerCamelCase__ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F"Number {x} is at index {res}")
| 704 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 0 |
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
_UpperCamelCase = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_UpperCamelCase , _UpperCamelCase = unsorted[j - 1], unsorted[j]
_UpperCamelCase = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
_UpperCamelCase , _UpperCamelCase = unsorted[j + 1], unsorted[j]
_UpperCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(F"{cocktail_shaker_sort(unsorted) = }")
| 705 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 0 |
def lowerCAmelCase__ ( a__ , a__ ) ->Optional[Any]:
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 706 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 0 |
import re
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
if len(re.findall("[ATCG]" , lowerCAmelCase_ ) ) != len(lowerCAmelCase_ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *lowercase_ : Dict , **lowercase_ : Any) -> str:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
requires_backends(self , "vision")
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def __UpperCAmelCase ( self : Dict , lowercase_ : str=None) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = {}
if top_k is not None:
_UpperCamelCase = top_k
return {}, {}, postprocess_params
def __call__( self : Any , lowercase_ : List[Any] , **lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
return super().__call__(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = load_image(lowercase_)
_UpperCamelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework)
return model_inputs
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Dict) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model(**lowercase_)
return model_outputs
def __UpperCAmelCase ( self : str , lowercase_ : Tuple , lowercase_ : Optional[Any]=5) -> Union[str, Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
_UpperCamelCase = model_outputs.logits.softmax(-1)[0]
_UpperCamelCase , _UpperCamelCase = probs.topk(lowercase_)
elif self.framework == "tf":
_UpperCamelCase = stable_softmax(model_outputs.logits , axis=-1)[0]
_UpperCamelCase = tf.math.top_k(lowercase_ , k=lowercase_)
_UpperCamelCase , _UpperCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}')
_UpperCamelCase = scores.tolist()
_UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_)]
| 708 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCamelCase__ = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
lowerCamelCase__ = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
lowerCamelCase__ = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase__ ( a__ , a__ ) ->Dict:
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCAmelCase__ ( a__ , a__ , a__="binary" ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = simple_accuracy(_A , _A )
_UpperCamelCase = float(fa_score(y_true=_A , y_pred=_A , average=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {}
for id_pred, label in zip(_A , _A ):
_UpperCamelCase = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
_UpperCamelCase = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_UpperCamelCase = [(pred, label)]
_UpperCamelCase , _UpperCamelCase = [], []
for question, preds_labels in question_map.items():
_UpperCamelCase , _UpperCamelCase = zip(*_A )
_UpperCamelCase = fa_score(y_true=_A , y_pred=_A , average="macro" )
fas.append(_A )
_UpperCamelCase = int(sum(pred == label for pred, label in preds_labels ) == len(_A ) )
ems.append(_A )
_UpperCamelCase = float(sum(_A ) / len(_A ) )
_UpperCamelCase = sum(_A ) / len(_A )
_UpperCamelCase = float(fa_score(y_true=_A , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64"),
"query": datasets.Value("int64"),
},
"prediction_text": datasets.Value("string"),
},
"references": {
"idx": {
"passage": datasets.Value("int64"),
"query": datasets.Value("int64"),
},
"answers": datasets.Sequence(datasets.Value("string")),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64"),
"paragraph": datasets.Value("int64"),
"question": datasets.Value("int64"),
},
"prediction": datasets.Value("int64"),
},
"references": datasets.Value("int64"),
}
else:
return {
"predictions": datasets.Value("int64"),
"references": datasets.Value("int64"),
}
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int]) -> Optional[int]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="macro")
elif self.config_name == "record":
_UpperCamelCase = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
_UpperCamelCase = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]")
| 709 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
lowerCamelCase__ = {
'''gpt-neox-20b''': 2048,
}
class _UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , lowercase_ : Dict=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=None , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Tuple="<|endoftext|>" , lowercase_ : Optional[Any]=False , **lowercase_ : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , _A) != add_prefix_space:
_UpperCamelCase = getattr(_A , pre_tok_state.pop("type"))
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**_A)
_UpperCamelCase = add_prefix_space
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any = None) -> int:
"""simple docstring"""
_UpperCamelCase = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A) + [self.eos_token_id])
if len(_A) > self.model_max_length:
_UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 711 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCAmelCase__ ( a__ , a__=7 ) ->int:
'''simple docstring'''
_UpperCamelCase = None
if token is not None:
_UpperCamelCase = {"Accept": "application/vnd.github+json", "Authorization": f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
_UpperCamelCase = "636036"
_UpperCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
_UpperCamelCase = requests.get(__snake_case , headers=__snake_case ).json()
return result["workflow_runs"]
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = get_daily_ci_runs(__snake_case )
_UpperCamelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCamelCase = workflow_run["id"]
break
return workflow_run_id
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = get_last_daily_ci_runs(__snake_case )
if workflow_run_id is not None:
_UpperCamelCase = get_artifacts_links(worflow_run_id=__snake_case , token=__snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCamelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=__snake_case , artifact_url=__snake_case , output_dir=__snake_case , token=__snake_case )
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Optional[int]:
'''simple docstring'''
get_last_daily_ci_artifacts(__snake_case , __snake_case , __snake_case )
_UpperCamelCase = {}
for artifact_name in artifact_names:
_UpperCamelCase = os.path.join(__snake_case , f'{artifact_name}.zip' )
if os.path.isfile(__snake_case ):
_UpperCamelCase = {}
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
with z.open(__snake_case ) as f:
_UpperCamelCase = f.read().decode("UTF-8" )
return results
| 712 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 0 |
from __future__ import annotations
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_UpperCamelCase = array[indexa], array[indexa]
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[int]:
'''simple docstring'''
if length > 1:
_UpperCamelCase = int(length / 2 )
for i in range(lowerCamelCase_ , low + middle ):
comp_and_swap(lowerCamelCase_ , lowerCamelCase_ , i + middle , lowerCamelCase_ )
bitonic_merge(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
bitonic_merge(lowerCamelCase_ , low + middle , lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Tuple:
'''simple docstring'''
if length > 1:
_UpperCamelCase = int(length / 2 )
bitonic_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
bitonic_sort(lowerCamelCase_ , low + middle , lowerCamelCase_ , 0 )
bitonic_merge(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 713 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return 32
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 100
@property
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCamelCase = UNetaDConditionModel(**lowercase_)
return model
@property
def __UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs)
return model
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCamelCase = DDIMScheduler(**lowercase_)
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCAmelCase ( self : str , lowercase_ : Dict , lowercase_ : List[str]=0) -> List[str]:
"""simple docstring"""
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
lowercase_)
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCamelCase = Image.fromarray(np.uinta(lowercase_)).convert("RGB").resize((256, 256))
# create hint
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase_)).to(lowercase_)
if str(lowercase_).startswith("mps"):
_UpperCamelCase = torch.manual_seed(lowercase_)
else:
_UpperCamelCase = torch.Generator(device=lowercase_).manual_seed(lowercase_)
_UpperCamelCase = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = "cpu"
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**lowercase_)
_UpperCamelCase = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = pipe(**self.get_dummy_inputs(lowercase_))
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy")
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
_UpperCamelCase = init_image.resize((512, 512))
_UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
_UpperCamelCase = torch.from_numpy(np.array(lowercase_)).float() / 2_55.0
_UpperCamelCase = hint.permute(2 , 0 , 1).unsqueeze(0)
_UpperCamelCase = "A robot, 4k photo"
_UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(lowercase_)
_UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
_UpperCamelCase = pipeline.to(lowercase_)
pipeline.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.Generator(device="cpu").manual_seed(0)
_UpperCamelCase , _UpperCamelCase = pipe_prior(
lowercase_ , image=lowercase_ , strength=0.85 , generator=lowercase_ , negative_prompt="" , ).to_tuple()
_UpperCamelCase = pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , hint=lowercase_ , generator=lowercase_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 82 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 714 | def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
assert (
isinstance(a__ , a__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_UpperCamelCase , _UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCamelCase , _UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase__ ( a__ ) ->List[str]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase__ ( ) ->List[str]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
_UpperCamelCase = "mock-s3-bucket"
_UpperCamelCase = f's3://{mock_bucket}'
_UpperCamelCase = extract_path_from_uri(lowerCamelCase__ )
assert dataset_path.startswith("s3://" ) is False
_UpperCamelCase = "./local/path"
_UpperCamelCase = extract_path_from_uri(lowerCamelCase__ )
assert dataset_path == new_dataset_path
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = is_remote_filesystem(lowerCamelCase__ )
assert is_remote is True
_UpperCamelCase = fsspec.filesystem("file" )
_UpperCamelCase = is_remote_filesystem(lowerCamelCase__ )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , lowerCamelCase__ )
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
_UpperCamelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
_UpperCamelCase = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase__ )
_UpperCamelCase = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCamelCase = os.path.basename(lowerCamelCase__ )
_UpperCamelCase = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(lowerCamelCase__ , "r" , encoding="utf-8" ) as f, open(lowerCamelCase__ , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
_UpperCamelCase = compressed_file_paths[protocol]
_UpperCamelCase = "dataset.jsonl"
_UpperCamelCase = f'{protocol}://{member_file_path}::{compressed_file_path}'
_UpperCamelCase = fsspec.get_fs_token_paths(lowerCamelCase__ )
assert fs.isfile(lowerCamelCase__ )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = hf_api.dataset_info(lowerCamelCase__ , token=lowerCamelCase__ )
_UpperCamelCase = HfFileSystem(repo_info=lowerCamelCase__ , token=lowerCamelCase__ )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(lowerCamelCase__ ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
_UpperCamelCase = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase__ , lowerCamelCase__ , clobber=lowerCamelCase__ )
with pytest.warns(lowerCamelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCamelCase__ ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 715 | from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , **lowercase_ : Tuple) -> Any:
"""simple docstring"""
super().__init__(**lowercase_)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , "vision")
self.check_model_type(lowercase_)
def __call__( self : str , lowercase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , lowercase_ : Union[str, List[str]] = None , **lowercase_ : str , ) -> List[str]:
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCamelCase = kwargs.pop("text_queries")
if isinstance(lowercase_ , (str, Image.Image)):
_UpperCamelCase = {"image": image, "candidate_labels": candidate_labels}
else:
_UpperCamelCase = image
_UpperCamelCase = super().__call__(lowercase_ , **lowercase_)
return results
def __UpperCAmelCase ( self : Any , **lowercase_ : int) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {}
if "threshold" in kwargs:
_UpperCamelCase = kwargs["threshold"]
if "top_k" in kwargs:
_UpperCamelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = load_image(inputs["image"])
_UpperCamelCase = inputs["candidate_labels"]
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = candidate_labels.split(",")
_UpperCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(lowercase_):
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=self.framework)
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=self.framework)
yield {
"is_last": i == len(lowercase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self : Dict , lowercase_ : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("target_size")
_UpperCamelCase = model_inputs.pop("candidate_label")
_UpperCamelCase = model_inputs.pop("is_last")
_UpperCamelCase = self.model(**lowercase_)
_UpperCamelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self : int , lowercase_ : Tuple , lowercase_ : List[str]=0.1 , lowercase_ : int=None) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for model_output in model_outputs:
_UpperCamelCase = model_output["candidate_label"]
_UpperCamelCase = BaseModelOutput(lowercase_)
_UpperCamelCase = self.image_processor.post_process_object_detection(
outputs=lowercase_ , threshold=lowercase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
_UpperCamelCase = outputs["scores"][index].item()
_UpperCamelCase = self._get_bounding_box(outputs["boxes"][index][0])
_UpperCamelCase = {"score": score, "label": label, "box": box}
results.append(lowercase_)
_UpperCamelCase = sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)
if top_k:
_UpperCamelCase = results[:top_k]
return results
def __UpperCAmelCase ( self : str , lowercase_ : "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = box.int().tolist()
_UpperCamelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 82 | 0 |
lowerCamelCase__ = '''\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'''
lowerCamelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 716 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCamelCase = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_UpperCamelCase = value
else:
_UpperCamelCase = value
return new_state_dict
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:256, :]
_UpperCamelCase = in_proj_bias[:256]
_UpperCamelCase = in_proj_weight[256:512, :]
_UpperCamelCase = in_proj_bias[256:512]
_UpperCamelCase = in_proj_weight[-256:, :]
_UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[:256, :]
_UpperCamelCase = in_proj_bias[:256]
_UpperCamelCase = in_proj_weight[256:512, :]
_UpperCamelCase = in_proj_bias[256:512]
_UpperCamelCase = in_proj_weight[-256:, :]
_UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCamelCase = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_UpperCamelCase = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCamelCase = in_proj_weight_cross_attn[:256, :]
_UpperCamelCase = in_proj_bias_cross_attn[:256]
_UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
_UpperCamelCase = in_proj_bias_cross_attn[256:512]
_UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
_UpperCamelCase = in_proj_bias_cross_attn[-256:]
def lowerCAmelCase__ ( a__ , a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = image.size
_UpperCamelCase = max(a__ , a__ )
_UpperCamelCase = 800 if '''detection''' in checkpoint_url else 1_000
_UpperCamelCase = target_max_size / current_max_size
_UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCAmelCase__ ( a__ ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = F.to_tensor(a__ )
_UpperCamelCase = F.normalize(a__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->List[str]:
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
_UpperCamelCase = torch.hub.load_state_dict_from_url(a__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
_UpperCamelCase = rename_backbone_keys(a__ )
# query, key and value matrices need special treatment
read_in_q_k_v(a__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCamelCase = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCamelCase = state_dict.pop(a__ )
_UpperCamelCase = val
# create HuggingFace model and load state dict
_UpperCamelCase = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_UpperCamelCase = 15
_UpperCamelCase = 2
_UpperCamelCase = {0: '''table''', 1: '''table rotated'''}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCamelCase = 125
_UpperCamelCase = 6
_UpperCamelCase = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
_UpperCamelCase = TableTransformerForObjectDetection(a__ )
model.load_state_dict(a__ )
model.eval()
# verify our conversion
_UpperCamelCase = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
_UpperCamelCase = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a__ )
_UpperCamelCase = Image.open(a__ ).convert("RGB" )
_UpperCamelCase = normalize(resize(a__ , a__ ) ).unsqueeze(0 )
_UpperCamelCase = model(a__ )
if "detection" in checkpoint_url:
_UpperCamelCase = (1, 15, 3)
_UpperCamelCase = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_UpperCamelCase = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_UpperCamelCase = (1, 125, 7)
_UpperCamelCase = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_UpperCamelCase = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , a__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
image_processor.save_pretrained(a__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_UpperCamelCase = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(a__ )
image_processor.push_to_hub(a__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 717 | import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = """T5Config"""
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->jnp.ndarray:
'''simple docstring'''
_UpperCamelCase = jnp.zeros_like(a__ )
_UpperCamelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_UpperCamelCase = shifted_input_ids.at[:, 0].set(a__ )
_UpperCamelCase = jnp.where(shifted_input_ids == -100 , a__ , a__ )
return shifted_input_ids
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__A = '''mt5'''
__A = MTaConfig
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__A = '''mt5'''
__A = MTaConfig
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__A = '''mt5'''
__A = MTaConfig
| 718 | def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 | 0 |
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[Any] = 0) -> List[str]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = row, column
_UpperCamelCase = [[default_value for c in range(SCREAMING_SNAKE_CASE_)] for r in range(SCREAMING_SNAKE_CASE_)]
def __str__( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
_UpperCamelCase = 0
for row_vector in self.array:
for obj in row_vector:
_UpperCamelCase = max(SCREAMING_SNAKE_CASE_ , len(str(SCREAMING_SNAKE_CASE_)))
_UpperCamelCase = f'%{max_element_length}s'
# Make string and return
def single_line(lowercase_ : int) -> str:
nonlocal string_format_identifier
_UpperCamelCase = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(SCREAMING_SNAKE_CASE_) for row_vector in self.array)
return s
def __repr__( self : str) -> str:
"""simple docstring"""
return str(self)
def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> Dict:
"""simple docstring"""
if not (isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple)) and len(SCREAMING_SNAKE_CASE_) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , lowercase_ : Optional[int]) -> Optional[int]:
"""simple docstring"""
assert self.validate_indicies(SCREAMING_SNAKE_CASE_)
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[Any] , lowercase_ : Dict , lowercase_ : List[Any]) -> Optional[int]:
"""simple docstring"""
assert self.validate_indicies(SCREAMING_SNAKE_CASE_)
_UpperCamelCase = value
def __add__( self : Optional[Any] , lowercase_ : str) -> str:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert self.row == another.row and self.column == another.column
# Add
_UpperCamelCase = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase = self[r, c] + another[r, c]
return result
def __neg__( self : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase = -self[r, c]
return result
def __sub__( self : Any , lowercase_ : Dict) -> List[Any]:
"""simple docstring"""
return self + (-another)
def __mul__( self : Tuple , lowercase_ : List[Any]) -> int:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (int, float)): # Scalar multiplication
_UpperCamelCase = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase = self[r, c] * another
return result
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): # Matrix multiplication
assert self.column == another.row
_UpperCamelCase = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_UpperCamelCase = f'Unsupported type given for another ({type(SCREAMING_SNAKE_CASE_)})'
raise TypeError(SCREAMING_SNAKE_CASE_)
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
_UpperCamelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]) -> str:
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_UpperCamelCase = v.transpose()
_UpperCamelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
_UpperCamelCase = 1
print(f'a^(-1) is {ainv}' )
# u, v
_UpperCamelCase = Matrix(3 , 1 , 0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1, 2, -3
_UpperCamelCase = Matrix(3 , 1 , 0 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(__a , __a )}' )
def lowerCAmelCase__ ( ) ->Any:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 719 | import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82 | 0 |
from __future__ import annotations
import requests
def lowerCAmelCase__ ( a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(a__ ).json()
def lowerCAmelCase__ ( a__ = 10 ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_UpperCamelCase = requests.get(a__ ).json()[:max_stories]
return [get_hackernews_story(a__ ) for story_id in story_ids]
def lowerCAmelCase__ ( a__ = 10 ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = hackernews_top_stories(a__ )
return "\n".join("* [{title}]({url})".format(**a__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 720 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : int=2 , lowercase_ : Optional[int]=3 , lowercase_ : Any=4 , lowercase_ : Optional[Any]=2 , lowercase_ : str=7 , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=True , lowercase_ : List[str]=99 , lowercase_ : Tuple=36 , lowercase_ : Union[str, Any]=3 , lowercase_ : Optional[int]=4 , lowercase_ : List[Any]=37 , lowercase_ : List[str]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[int]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : int=0.02 , lowercase_ : Optional[Any]=6 , lowercase_ : Optional[int]=6 , lowercase_ : Any=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=1000 , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = text_seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = coordinate_size
_UpperCamelCase = shape_size
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCamelCase = text_seq_length
_UpperCamelCase = (image_size // patch_size) ** 2 + 1
_UpperCamelCase = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCamelCase = bbox[i, j, 3]
_UpperCamelCase = bbox[i, j, 1]
_UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCamelCase = bbox[i, j, 2]
_UpperCamelCase = bbox[i, j, 0]
_UpperCamelCase = t
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
_UpperCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Any , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = LayoutLMvaModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
# text + image
_UpperCamelCase = model(UpperCAmelCase__ , pixel_values=UpperCAmelCase__)
_UpperCamelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
_UpperCamelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__)
_UpperCamelCase = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
_UpperCamelCase = model(UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
_UpperCamelCase = model(pixel_values=UpperCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = LayoutLMvaForSequenceClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_UpperCamelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = LayoutLMvaForTokenClassification(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_UpperCamelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
_UpperCamelCase = model(
UpperCAmelCase__ , bbox=UpperCAmelCase__ , pixel_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
_UpperCamelCase
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
__A = False
__A = False
__A = False
__A = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self : str , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Tuple) -> Optional[Any]:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase = LayoutLMvaModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37)
def __UpperCAmelCase ( self : str , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Dict=False) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(UpperCAmelCase__)
if model_class in get_values(UpperCAmelCase__):
_UpperCamelCase = {
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(UpperCAmelCase__ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase__):
_UpperCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__)
elif model_class in get_values(UpperCAmelCase__):
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__)
elif model_class in [
*get_values(UpperCAmelCase__),
]:
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__)
elif model_class in [
*get_values(UpperCAmelCase__),
]:
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase__ , )
return inputs_dict
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__)
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__)
def __UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__)
@slow
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = LayoutLMvaModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def lowerCAmelCase__ ( ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base").to(UpperCAmelCase__)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=UpperCAmelCase__ , return_tensors="pt").pixel_values.to(UpperCAmelCase__)
_UpperCamelCase = torch.tensor([[1, 2]])
_UpperCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
_UpperCamelCase = model(
input_ids=input_ids.to(UpperCAmelCase__) , bbox=bbox.to(UpperCAmelCase__) , pixel_values=pixel_values.to(UpperCAmelCase__) , )
# verify the logits
_UpperCamelCase = torch.Size((1, 199, 768))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__)
_UpperCamelCase = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1e-4))
| 721 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 5_0000
lowerCamelCase__ = 5000
lowerCamelCase__,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase__ ( a__ , a__ ) ->int:
'''simple docstring'''
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
for i in range(0 , len(a__ ) , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
_UpperCamelCase = dataset[i]
@get_duration
def lowerCAmelCase__ ( a__ , a__ , a__ , a__ ) ->Dict:
'''simple docstring'''
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
_UpperCamelCase = dataset[i : i + batch_size]
def lowerCAmelCase__ ( ) ->Dict:
'''simple docstring'''
_UpperCamelCase = {"num examples": SPEED_TEST_N_EXAMPLES}
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_UpperCamelCase = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_UpperCamelCase = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(a__ , "dataset.arrow" ) , a__ , num_examples=a__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
_UpperCamelCase = func(a__ , **a__ )
print("shuffling dataset" )
_UpperCamelCase = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(a__ ) )
_UpperCamelCase = func(
a__ , **a__ )
with open(a__ , "wb" ) as f:
f.write(json.dumps(a__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : str , lowercase_ : Optional[Any] , ) -> Dict:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 32
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = "gelu"
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 512
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = None
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = TFEsmModel(config=_lowerCamelCase)
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCamelCase = model(_lowerCamelCase)
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_lowerCamelCase)
_UpperCamelCase = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : str , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = True
_UpperCamelCase = TFEsmModel(config=_lowerCamelCase)
_UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
_UpperCamelCase = model(_lowerCamelCase)
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_lowerCamelCase , encoder_hidden_states=_lowerCamelCase)
# Also check the case where encoder outputs are not passed
_UpperCamelCase = model(_lowerCamelCase , attention_mask=_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCAmelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = TFEsmForMaskedLM(config=_lowerCamelCase)
_UpperCamelCase = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : Dict , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : int) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFEsmForTokenClassification(config=_lowerCamelCase)
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCamelCase = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __UpperCAmelCase, __UpperCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__A = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__A = False
__A = False
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCamelCase = TFEsmModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37)
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase)
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase)
def __UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase)
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase)
@slow
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFEsmModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
@unittest.skip("Protein models do not support embedding resizing.")
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
pass
@unittest.skip("Protein models do not support embedding resizing.")
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_lowerCamelCase)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_UpperCamelCase = model.get_bias()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for k, v in name.items():
assert isinstance(_lowerCamelCase , tf.Variable)
else:
_UpperCamelCase = model.get_output_embeddings()
assert x is None
_UpperCamelCase = model.get_bias()
assert name is None
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D")
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]])
_UpperCamelCase = model(_lowerCamelCase)[0]
_UpperCamelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape) , _lowerCamelCase)
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2))
@slow
def __UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D")
_UpperCamelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
_UpperCamelCase = model(_lowerCamelCase)[0]
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 700 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _UpperCAmelCase ( unittest.TestCase ):
__A = MODEL_FOR_CAUSAL_LM_MAPPING
__A = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCamelCase = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt")
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator("This is a test" , do_sample=__lowerCamelCase)
self.assertEqual(
__lowerCamelCase , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
_UpperCamelCase = text_generator(["This is a test", "This is a second test"])
self.assertEqual(
__lowerCamelCase , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
_UpperCamelCase = text_generator("This is a test" , do_sample=__lowerCamelCase , num_return_sequences=2 , return_tensors=__lowerCamelCase)
self.assertEqual(
__lowerCamelCase , [
{"generated_token_ids": ANY(__lowerCamelCase)},
{"generated_token_ids": ANY(__lowerCamelCase)},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
["This is a test", "This is a second test"] , do_sample=__lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCamelCase , )
self.assertEqual(
__lowerCamelCase , [
[
{"generated_token_ids": ANY(__lowerCamelCase)},
{"generated_token_ids": ANY(__lowerCamelCase)},
],
[
{"generated_token_ids": ANY(__lowerCamelCase)},
{"generated_token_ids": ANY(__lowerCamelCase)},
],
] , )
@require_tf
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf")
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator("This is a test" , do_sample=__lowerCamelCase)
self.assertEqual(
__lowerCamelCase , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
_UpperCamelCase = text_generator(["This is a test", "This is a second test"] , do_sample=__lowerCamelCase)
self.assertEqual(
__lowerCamelCase , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def __UpperCAmelCase ( self : Tuple , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = TextGenerationPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase)
return text_generator, ["This is a test", "Another test"]
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = text_generator(__lowerCamelCase)
self.assertEqual(
__lowerCamelCase , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
_UpperCamelCase = text_generator(__lowerCamelCase , stop_sequence=" fe")
self.assertEqual(__lowerCamelCase , [{"generated_text": "Hello I believe in fe"}])
def __UpperCAmelCase ( self : Any , lowercase_ : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator("This is a test")
self.assertEqual(__lowerCamelCase , [{"generated_text": ANY(__lowerCamelCase)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
_UpperCamelCase = text_generator("This is a test" , return_full_text=__lowerCamelCase)
self.assertEqual(__lowerCamelCase , [{"generated_text": ANY(__lowerCamelCase)}])
self.assertNotIn("This is a test" , outputs[0]["generated_text"])
_UpperCamelCase = pipeline(task="text-generation" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , return_full_text=__lowerCamelCase)
_UpperCamelCase = text_generator("This is a test")
self.assertEqual(__lowerCamelCase , [{"generated_text": ANY(__lowerCamelCase)}])
self.assertNotIn("This is a test" , outputs[0]["generated_text"])
_UpperCamelCase = text_generator("This is a test" , return_full_text=__lowerCamelCase)
self.assertEqual(__lowerCamelCase , [{"generated_text": ANY(__lowerCamelCase)}])
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test"))
_UpperCamelCase = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__lowerCamelCase)
self.assertEqual(
__lowerCamelCase , [
[{"generated_text": ANY(__lowerCamelCase)}, {"generated_text": ANY(__lowerCamelCase)}],
[{"generated_text": ANY(__lowerCamelCase)}, {"generated_text": ANY(__lowerCamelCase)}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCamelCase)
self.assertEqual(
__lowerCamelCase , [
[{"generated_text": ANY(__lowerCamelCase)}, {"generated_text": ANY(__lowerCamelCase)}],
[{"generated_text": ANY(__lowerCamelCase)}, {"generated_text": ANY(__lowerCamelCase)}],
] , )
with self.assertRaises(__lowerCamelCase):
_UpperCamelCase = text_generator("test" , return_full_text=__lowerCamelCase , return_text=__lowerCamelCase)
with self.assertRaises(__lowerCamelCase):
_UpperCamelCase = text_generator("test" , return_full_text=__lowerCamelCase , return_tensors=__lowerCamelCase)
with self.assertRaises(__lowerCamelCase):
_UpperCamelCase = text_generator("test" , return_text=__lowerCamelCase , return_tensors=__lowerCamelCase)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator("")
self.assertEqual(__lowerCamelCase , [{"generated_text": ANY(__lowerCamelCase)}])
else:
with self.assertRaises((ValueError, AssertionError)):
_UpperCamelCase = text_generator("")
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator("This is a test" * 500 , max_new_tokens=20)
_UpperCamelCase = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20)
# Hole strategy cannot work
with self.assertRaises(__lowerCamelCase):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
_UpperCamelCase = pipe("This is a test")
self.assertEqual(
__lowerCamelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa)
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
_UpperCamelCase = pipe("This is a test")
self.assertEqual(
__lowerCamelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto")
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa)
_UpperCamelCase = pipe("This is a test")
self.assertEqual(
__lowerCamelCase , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
import torch
_UpperCamelCase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa)
pipe("This is a test")
@require_torch
@require_accelerate
@require_torch_gpu
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCamelCase = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa)
pipe("This is a test" , do_sample=__lowerCamelCase , top_p=0.5)
def __UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2")
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger("transformers.generation.tf_utils")
else:
_UpperCamelCase = logging.get_logger("transformers.generation.utils")
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowerCamelCase) as cl:
_UpperCamelCase = text_generator(__lowerCamelCase , max_length=10 , max_new_tokens=1)
self.assertIn(__lowerCamelCase , cl.out)
# The user only sets one -> no warning
with CaptureLogger(__lowerCamelCase) as cl:
_UpperCamelCase = text_generator(__lowerCamelCase , max_new_tokens=1)
self.assertNotIn(__lowerCamelCase , cl.out)
with CaptureLogger(__lowerCamelCase) as cl:
_UpperCamelCase = text_generator(__lowerCamelCase , max_length=10)
self.assertNotIn(__lowerCamelCase , cl.out)
| 701 | import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__A = MODEL_FOR_MASKED_LM_MAPPING
__A = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 25506, "token_str": " accuser"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 25506,
"token_str": " accuser",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 16416, "token_str": "ELS"},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 13606, "token_str": " Clara"},
] , )
_UpperCamelCase = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
_UpperCamelCase = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(lowercase_)
@slow
@require_tf
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : int) -> Any:
"""simple docstring"""
_UpperCamelCase = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is John", "score": 0.0_08, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_07, "token": 1573, "token_str": " Chris"},
] , )
_UpperCamelCase = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(lowercase_) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_51,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_14,
"token": 12790,
"token_str": " Lyon",
},
] , )
_UpperCamelCase = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{"sequence": "My name is Patrick", "score": 0.0_05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_00, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_00, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def __UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
_UpperCamelCase = None
_UpperCamelCase = None
self.run_pipeline_test(lowercase_ , [])
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int]) -> int:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = fill_masker.tokenizer
_UpperCamelCase = fill_masker.model
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}'])
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'])
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker("This is")
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : Dict , lowercase_ : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = sorted(vocab.keys())[:2]
# Pipeline argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Call argument
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase_)
_UpperCamelCase = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase_))
# Score equivalence
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["token_str"] for top_mask in outputs]
_UpperCamelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase_)
_UpperCamelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""])
with self.assertRaises(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , targets="")
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]) -> Any:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}')
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tokenizer.get_vocab()
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCamelCase = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase_: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
_UpperCamelCase = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def __UpperCAmelCase ( self : int , lowercase_ : Optional[int] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCamelCase = sorted(vocab.keys())[:3]
_UpperCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCamelCase = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase_ , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
_UpperCamelCase = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2)
self.assertEqual(
lowercase_ , [
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
[
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
{"sequence": ANY(lowercase_), "score": ANY(lowercase_), "token": ANY(lowercase_), "token_str": ANY(lowercase_)},
],
] , )
| 82 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class _UpperCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , **lowercase_ : int) -> str:
"""simple docstring"""
super().__init__(**_lowercase)
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
# No specific FOR_XXX available yet
def __call__( self : Union[str, Any] , lowercase_ : Union[np.ndarray, bytes, str] , **lowercase_ : Any) -> List[Any]:
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase)
def __UpperCAmelCase ( self : Union[str, Any] , **lowercase_ : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {}
if "candidate_labels" in kwargs:
_UpperCamelCase = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
_UpperCamelCase = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[str] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]="This is a sound of {}.") -> Dict:
"""simple docstring"""
if isinstance(_lowercase , _lowercase):
if audio.startswith("http://") or audio.startswith("https://"):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCamelCase = requests.get(_lowercase).content
else:
with open(_lowercase , "rb") as f:
_UpperCamelCase = f.read()
if isinstance(_lowercase , _lowercase):
_UpperCamelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate)
if not isinstance(_lowercase , np.ndarray):
raise ValueError("We expect a numpy ndarray as input")
if len(audio.shape) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline")
_UpperCamelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt")
_UpperCamelCase = candidate_labels
_UpperCamelCase = [hypothesis_template.format(_lowercase) for x in candidate_labels]
_UpperCamelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase)
_UpperCamelCase = [text_inputs]
return inputs
def __UpperCAmelCase ( self : Dict , lowercase_ : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase = model_inputs.pop("candidate_labels")
_UpperCamelCase = model_inputs.pop("text_inputs")
if isinstance(text_inputs[0] , _lowercase):
_UpperCamelCase = text_inputs[0]
else:
# Batching case.
_UpperCamelCase = text_inputs[0][0]
_UpperCamelCase = self.model(**_lowercase , **_lowercase)
_UpperCamelCase = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs.pop("candidate_labels")
_UpperCamelCase = model_outputs["logits"][0]
if self.framework == "pt":
_UpperCamelCase = logits.softmax(dim=0)
_UpperCamelCase = probs.tolist()
else:
raise ValueError("`tf` framework not supported.")
_UpperCamelCase = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase) , key=lambda lowercase_: -x[0])
]
return result
| 702 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__ , a__=False , a__=False , a__=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'transformer.blocks.{i}.norm1.weight', f'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm1.bias', f'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.weight', f'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.bias', f'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.norm2.weight', f'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm2.bias', f'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.mlp.fc1.weight', f'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc1.bias', f'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.weight', f'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.bias', f'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCAmelCase__ ( a__ , a__ ) ->Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_UpperCamelCase = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.weight' )
_UpperCamelCase = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase = in_proj_bias[: config.hidden_size]
_UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( a__ ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = dct.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase = val
@torch.no_grad()
def lowerCAmelCase__ ( a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__SCREAMING_SNAKE_CASE )
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
if "vqa" in checkpoint_url:
_UpperCamelCase = True
_UpperCamelCase = 3_129
_UpperCamelCase = "huggingface/label-files"
_UpperCamelCase = "vqa2-id2label.json"
_UpperCamelCase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
_UpperCamelCase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = ViltForQuestionAnswering(__SCREAMING_SNAKE_CASE )
elif "nlvr" in checkpoint_url:
_UpperCamelCase = True
_UpperCamelCase = 2
_UpperCamelCase = {0: "False", 1: "True"}
_UpperCamelCase = {v: k for k, v in config.idalabel.items()}
_UpperCamelCase = 3
_UpperCamelCase = ViltForImagesAndTextClassification(__SCREAMING_SNAKE_CASE )
elif "irtr" in checkpoint_url:
_UpperCamelCase = True
_UpperCamelCase = ViltForImageAndTextRetrieval(__SCREAMING_SNAKE_CASE )
elif "mlm_itm" in checkpoint_url:
_UpperCamelCase = True
_UpperCamelCase = ViltForMaskedLM(__SCREAMING_SNAKE_CASE )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_UpperCamelCase = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location="cpu" )["state_dict"]
_UpperCamelCase = create_rename_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if mlm_model or irtr_model:
_UpperCamelCase = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_UpperCamelCase = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Define processor
_UpperCamelCase = ViltImageProcessor(size=384 )
_UpperCamelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
_UpperCamelCase = ViltProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Forward pass on example inputs (image + text)
if nlvr_model:
_UpperCamelCase = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__SCREAMING_SNAKE_CASE ).raw )
_UpperCamelCase = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__SCREAMING_SNAKE_CASE ).raw )
_UpperCamelCase = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_UpperCamelCase = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="pt" )
_UpperCamelCase = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="pt" )
_UpperCamelCase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_UpperCamelCase = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=__SCREAMING_SNAKE_CASE ).raw )
if mlm_model:
_UpperCamelCase = "a bunch of [MASK] laying on a [MASK]."
else:
_UpperCamelCase = "How many cats are there?"
_UpperCamelCase = processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="pt" )
_UpperCamelCase = model(**__SCREAMING_SNAKE_CASE )
# Verify outputs
if mlm_model:
_UpperCamelCase = torch.Size([1, 11, 30_522] )
_UpperCamelCase = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
# verify masked token prediction equals "cats"
_UpperCamelCase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_UpperCamelCase = torch.Size([1, 3_129] )
_UpperCamelCase = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
# verify vqa prediction equals "2"
_UpperCamelCase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_UpperCamelCase = torch.Size([1, 2] )
_UpperCamelCase = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase__ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 703 | import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__ , a__=False ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCAmelCase__ ( a__ , a__ , a__=False ) ->Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCamelCase = ""
else:
_UpperCamelCase = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
_UpperCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase = in_proj_bias[: config.hidden_size]
_UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = dct.pop(_lowerCamelCase )
_UpperCamelCase = val
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = DeiTConfig()
# all deit models have fine-tuned heads
_UpperCamelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCamelCase = 1_000
_UpperCamelCase = "huggingface/label-files"
_UpperCamelCase = "imagenet-1k-id2label.json"
_UpperCamelCase = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_UpperCamelCase = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = int(deit_name[-6:-4] )
_UpperCamelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_UpperCamelCase = 192
_UpperCamelCase = 768
_UpperCamelCase = 12
_UpperCamelCase = 3
elif deit_name[9:].startswith("small" ):
_UpperCamelCase = 384
_UpperCamelCase = 1_536
_UpperCamelCase = 12
_UpperCamelCase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_UpperCamelCase = 1_024
_UpperCamelCase = 4_096
_UpperCamelCase = 24
_UpperCamelCase = 16
# load original model from timm
_UpperCamelCase = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCamelCase = timm_model.state_dict()
_UpperCamelCase = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
_UpperCamelCase = DeiTForImageClassificationWithTeacher(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_UpperCamelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_UpperCamelCase = DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size )
_UpperCamelCase = image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCamelCase = encoding["pixel_values"]
_UpperCamelCase = model(_lowerCamelCase )
_UpperCamelCase = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 705 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''dpr'''
def __init__( self : Optional[Any] , lowercase_ : int=30522 , lowercase_ : str=768 , lowercase_ : List[Any]=12 , lowercase_ : Dict=12 , lowercase_ : str=3072 , lowercase_ : Any="gelu" , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : str=512 , lowercase_ : str=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Dict=1e-1_2 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int = 0 , **lowercase_ : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = projection_dim
_UpperCamelCase = position_embedding_type
| 82 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
__A = '''distilbert'''
__A = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : Optional[int] , lowercase_ : int=30522 , lowercase_ : Any=512 , lowercase_ : Optional[int]=False , lowercase_ : Optional[Any]=6 , lowercase_ : List[Any]=12 , lowercase_ : List[Any]=768 , lowercase_ : Tuple=4 * 768 , lowercase_ : List[str]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Any="gelu" , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=0.1 , lowercase_ : List[Any]=0.2 , lowercase_ : str=0 , **lowercase_ : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = sinusoidal_pos_embds
_UpperCamelCase = n_layers
_UpperCamelCase = n_heads
_UpperCamelCase = dim
_UpperCamelCase = hidden_dim
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation
_UpperCamelCase = initializer_range
_UpperCamelCase = qa_dropout
_UpperCamelCase = seq_classif_dropout
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 706 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 82 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 707 | import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase__ ( a__ , a__ , a__ , a__=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = XLNetConfig.from_json_file(a__ )
_UpperCamelCase = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
_UpperCamelCase = finetuning_task
_UpperCamelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCamelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_UpperCamelCase = finetuning_task
_UpperCamelCase = XLNetForQuestionAnswering(a__ )
else:
_UpperCamelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_UpperCamelCase = os.path.join(a__ , a__ )
_UpperCamelCase = os.path.join(a__ , a__ )
print(f'Save PyTorch model to {os.path.abspath(a__ )}' )
torch.save(model.state_dict() , a__ )
print(f'Save configuration file to {os.path.abspath(a__ )}' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any) -> Tuple:
"""simple docstring"""
super().__init__()
_UpperCamelCase = nn.Linear(3 , 4)
_UpperCamelCase = nn.BatchNormad(4)
_UpperCamelCase = nn.Linear(4 , 5)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Tuple) -> Dict:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__)))
class _UpperCAmelCase ( __A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Any , lowercase_ : List[Any] , *lowercase_ : List[Any] , **lowercase_ : Any) -> int:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _UpperCAmelCase ( __A ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Dict , lowercase_ : Union[str, Any]) -> Dict:
"""simple docstring"""
return output + 1
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase = ModelForTest()
_UpperCamelCase = ModelHook()
add_hook_to_module(UpperCamelCase__ , UpperCamelCase__)
self.assertEqual(test_model._hf_hook , UpperCamelCase__)
self.assertTrue(hasattr(UpperCamelCase__ , "_old_forward"))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward")
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ["x"])
remove_hook_from_module(UpperCamelCase__)
self.assertFalse(hasattr(UpperCamelCase__ , "_hf_hook"))
self.assertFalse(hasattr(UpperCamelCase__ , "_old_forward"))
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = ModelForTest()
_UpperCamelCase = ModelHook()
add_hook_to_module(UpperCamelCase__ , UpperCamelCase__)
add_hook_to_module(UpperCamelCase__ , UpperCamelCase__ , append=UpperCamelCase__)
self.assertEqual(isinstance(test_model._hf_hook , UpperCamelCase__) , UpperCamelCase__)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(UpperCamelCase__ , "_old_forward"))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward")
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ["x"])
remove_hook_from_module(UpperCamelCase__)
self.assertFalse(hasattr(UpperCamelCase__ , "_hf_hook"))
self.assertFalse(hasattr(UpperCamelCase__ , "_old_forward"))
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = ModelForTest()
_UpperCamelCase = torch.randn(2 , 3)
_UpperCamelCase = test_model(x + 1)
_UpperCamelCase = test_model(x + 2)
_UpperCamelCase = PreForwardHook()
add_hook_to_module(UpperCamelCase__ , UpperCamelCase__)
_UpperCamelCase = test_model(UpperCamelCase__)
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
# Attaching a hook to a model when it already has one replaces, does not chain
_UpperCamelCase = PreForwardHook()
add_hook_to_module(UpperCamelCase__ , UpperCamelCase__)
_UpperCamelCase = test_model(UpperCamelCase__)
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
# You need to use the sequential hook to chain two or more hooks
_UpperCamelCase = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(UpperCamelCase__ , UpperCamelCase__)
_UpperCamelCase = test_model(UpperCamelCase__)
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5)
def __UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCamelCase = ModelForTest()
_UpperCamelCase = torch.randn(2 , 3)
_UpperCamelCase = test_model(UpperCamelCase__)
_UpperCamelCase = PostForwardHook()
add_hook_to_module(UpperCamelCase__ , UpperCamelCase__)
_UpperCamelCase = test_model(UpperCamelCase__)
self.assertTrue(torch.allclose(UpperCamelCase__ , output + 1 , atol=1e-5))
# Attaching a hook to a model when it already has one replaces, does not chain
_UpperCamelCase = PostForwardHook()
add_hook_to_module(UpperCamelCase__ , UpperCamelCase__)
_UpperCamelCase = test_model(UpperCamelCase__)
self.assertTrue(torch.allclose(UpperCamelCase__ , output + 1 , atol=1e-5))
# You need to use the sequential hook to chain two or more hooks
_UpperCamelCase = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(UpperCamelCase__ , UpperCamelCase__)
_UpperCamelCase = test_model(UpperCamelCase__)
assert torch.allclose(UpperCamelCase__ , output + 2 , atol=1e-5)
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = ModelForTest()
_UpperCamelCase = torch.randn(2 , 3)
_UpperCamelCase = test_model(UpperCamelCase__)
_UpperCamelCase = PostForwardHook()
add_hook_to_module(UpperCamelCase__ , UpperCamelCase__)
_UpperCamelCase = test_model(UpperCamelCase__)
self.assertTrue(torch.allclose(UpperCamelCase__ , output + 1))
self.assertTrue(outputa.requires_grad)
_UpperCamelCase = True
_UpperCamelCase = test_model(UpperCamelCase__)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu"))
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
_UpperCamelCase = torch.randn(2 , 3)
_UpperCamelCase = model(UpperCamelCase__)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCamelCase__ , AlignDevicesHook(io_same_device=UpperCamelCase__))
_UpperCamelCase = torch.randn(2 , 3).to(0)
_UpperCamelCase = model(UpperCamelCase__)
self.assertEqual(output.device , torch.device(0))
def __UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu"))
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
# This will move each submodule on different devices
_UpperCamelCase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase__))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCamelCase__))
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase__))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device , torch.device("meta"))
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
# Buffers are not included in the offload by default, so are on the execution device
_UpperCamelCase = torch.device(hook_kwargs["execution_device"])
self.assertEqual(model.batchnorm.running_mean.device , UpperCamelCase__)
_UpperCamelCase = torch.randn(2 , 3)
_UpperCamelCase = model(UpperCamelCase__)
self.assertEqual(output.device , UpperCamelCase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu"))
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
# Now test with buffers included in the offload
_UpperCamelCase = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase__))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCamelCase__))
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase__))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device , torch.device("meta"))
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta"))
_UpperCamelCase = torch.randn(2 , 3)
_UpperCamelCase = model(UpperCamelCase__)
self.assertEqual(output.device , UpperCamelCase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu"))
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
def __UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu"))
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
# This will move each submodule on different devices
_UpperCamelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(UpperCamelCase__ , execution_device=UpperCamelCase__ , offload=UpperCamelCase__)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device , torch.device("meta"))
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
# Buffers are not included in the offload by default, so are on the execution device
_UpperCamelCase = torch.device(UpperCamelCase__)
self.assertEqual(model.batchnorm.running_mean.device , UpperCamelCase__)
_UpperCamelCase = torch.randn(2 , 3)
_UpperCamelCase = model(UpperCamelCase__)
self.assertEqual(output.device , UpperCamelCase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase__)
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu"))
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
# Now test with buffers included in the offload
attach_align_device_hook(UpperCamelCase__ , execution_device=UpperCamelCase__ , offload=UpperCamelCase__ , offload_buffers=UpperCamelCase__)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device , torch.device("meta"))
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta"))
_UpperCamelCase = torch.randn(2 , 3)
_UpperCamelCase = model(UpperCamelCase__)
self.assertEqual(output.device , UpperCamelCase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase__)
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu"))
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
def __UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu"))
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
# This will move each submodule on different devices
_UpperCamelCase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
UpperCamelCase__ , execution_device=UpperCamelCase__ , offload=UpperCamelCase__ , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device , torch.device("meta"))
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
# Buffers are not included in the offload by default, so are on the execution device
_UpperCamelCase = torch.device(UpperCamelCase__)
self.assertEqual(model.batchnorm.running_mean.device , UpperCamelCase__)
_UpperCamelCase = torch.randn(2 , 3)
_UpperCamelCase = model(UpperCamelCase__)
self.assertEqual(output.device , UpperCamelCase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase__)
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu"))
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCamelCase__ , execution_device=UpperCamelCase__ , offload=UpperCamelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCamelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
self.assertEqual(model.batchnorm.weight.device , torch.device("meta"))
self.assertEqual(model.lineara.weight.device , torch.device("meta"))
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta"))
_UpperCamelCase = torch.randn(2 , 3)
_UpperCamelCase = model(UpperCamelCase__)
self.assertEqual(output.device , UpperCamelCase__)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase__)
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu"))
self.assertEqual(model.lineara.weight.device , torch.device("cpu"))
| 708 | import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Tuple) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = model
_UpperCamelCase = 2
_UpperCamelCase = nn.Linear(self.model.config.hidden_size , self.num_labels)
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->str:
'''simple docstring'''
_UpperCamelCase = LongformerModel.from_pretrained(a__ )
_UpperCamelCase = LightningModel(a__ )
_UpperCamelCase = torch.load(a__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCamelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 82 | 0 |
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = number
while duplicate > 0:
_UpperCamelCase , _UpperCamelCase = divmod(a__ , 10 )
fact_sum += factorial(a__ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
lowerCamelCase__ = int(input('''Enter number: ''').strip())
print(
F"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
)
| 709 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Union[str, Any]) -> None:
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 82 | 0 |
def lowerCAmelCase__ ( a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = 0
while b > 0:
if b & 1:
_UpperCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 710 | import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : Tuple) -> List[Any]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
if truncation is not None:
_UpperCamelCase = truncation
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_tensors is not None and return_type is None:
_UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
if len(lowercase_) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> Any:
"""simple docstring"""
return True
def __UpperCAmelCase ( self : Dict , *lowercase_ : List[str] , lowercase_ : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
_UpperCamelCase = ([prefix + arg for arg in args[0]],)
_UpperCamelCase = True
elif isinstance(args[0] , lowercase_):
_UpperCamelCase = (prefix + args[0],)
_UpperCamelCase = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`')
_UpperCamelCase = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowercase_ : Any , **lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = super().__call__(*lowercase_ , **lowercase_)
if (
isinstance(args[0] , lowercase_)
and all(isinstance(lowercase_ , lowercase_) for el in args[0])
and all(len(lowercase_) == 1 for res in result)
):
return [res[0] for res in result]
return result
def __UpperCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_)
return inputs
def __UpperCAmelCase ( self : str , lowercase_ : str , **lowercase_ : str) -> str:
"""simple docstring"""
if self.framework == "pt":
_UpperCamelCase , _UpperCamelCase = model_inputs["input_ids"].shape
elif self.framework == "tf":
_UpperCamelCase , _UpperCamelCase = tf.shape(model_inputs["input_ids"]).numpy()
_UpperCamelCase = generate_kwargs.get("min_length" , self.model.config.min_length)
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"])
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
_UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCamelCase = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def __UpperCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : int=ReturnType.TEXT , lowercase_ : int=False) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCamelCase = {
f'{self.return_name}_text': self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_)
return records
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[Any] , *lowercase_ : int , **lowercase_ : Dict) -> Optional[int]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.')
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})')
@add_end_docstrings(lowerCAmelCase )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''translation'''
def __UpperCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int) -> int:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)")
return True
def __UpperCAmelCase ( self : Tuple , *lowercase_ : Any , lowercase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Any=None , lowercase_ : Optional[Any]=None) -> List[str]:
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_)
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_)
def __UpperCAmelCase ( self : List[str] , lowercase_ : Dict=None , lowercase_ : str=None , **lowercase_ : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = super()._sanitize_parameters(**lowercase_)
if src_lang is not None:
_UpperCamelCase = src_lang
if tgt_lang is not None:
_UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCamelCase = kwargs.get("task" , self.task)
_UpperCamelCase = task.split("_")
if task and len(lowercase_) == 4:
# translation, XX, to YY
_UpperCamelCase = items[1]
_UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : str) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowercase_ , **lowercase_)
| 82 | 0 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Union[str, "sqlalchemy.sql.Selectable"] , lowercase_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , **lowercase_ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , **lowerCAmelCase_)
_UpperCamelCase = Sql(
cache_dir=lowerCAmelCase_ , features=lowerCAmelCase_ , sql=lowerCAmelCase_ , con=lowerCAmelCase_ , **lowerCAmelCase_ , )
def __UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , )
# Build dataset for splits
_UpperCamelCase = self.builder.as_dataset(
split="train" , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory)
return dataset
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Dataset , lowercase_ : str , lowercase_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ) -> Dict:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.')
_UpperCamelCase = dataset
_UpperCamelCase = name
_UpperCamelCase = con
_UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCamelCase = num_proc
_UpperCamelCase = to_sql_kwargs
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.to_sql_kwargs.pop("sql" , lowerCAmelCase_)
_UpperCamelCase = self.to_sql_kwargs.pop("con" , lowerCAmelCase_)
_UpperCamelCase = self.to_sql_kwargs.pop("index" , lowerCAmelCase_)
_UpperCamelCase = self._write(index=lowerCAmelCase_ , **self.to_sql_kwargs)
return written
def __UpperCAmelCase ( self : List[str] , lowercase_ : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = args
_UpperCamelCase = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
_UpperCamelCase = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase_ , offset + self.batch_size) , indices=self.dataset._indices , )
_UpperCamelCase = batch.to_pandas()
_UpperCamelCase = df.to_sql(self.name , self.con , index=lowerCAmelCase_ , **lowerCAmelCase_)
return num_rows or len(lowerCAmelCase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
_UpperCamelCase , _UpperCamelCase = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCAmelCase_ , lowerCAmelCase_)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 711 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 82 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class _UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__A = '''van'''
def __init__( self : Tuple , lowercase_ : Optional[Any]=224 , lowercase_ : int=3 , lowercase_ : List[str]=[7, 3, 3, 3] , lowercase_ : List[Any]=[4, 2, 2, 2] , lowercase_ : Tuple=[64, 128, 320, 512] , lowercase_ : int=[3, 3, 12, 3] , lowercase_ : Tuple=[8, 8, 4, 4] , lowercase_ : Tuple="gelu" , lowercase_ : Optional[int]=0.02 , lowercase_ : Dict=1e-6 , lowercase_ : int=1e-2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[Any]=0.0 , **lowercase_ : Dict , ) -> int:
"""simple docstring"""
super().__init__(**_UpperCAmelCase)
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = patch_sizes
_UpperCamelCase = strides
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = mlp_ratios
_UpperCamelCase = hidden_act
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = layer_scale_init_value
_UpperCamelCase = drop_path_rate
_UpperCamelCase = dropout_rate
| 712 | from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return getitem, k
def lowerCAmelCase__ ( a__ , a__ ) ->Tuple:
'''simple docstring'''
return setitem, k, v
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
return delitem, k
def lowerCAmelCase__ ( a__ , a__ , *a__ ) ->List[str]:
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCamelCase__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCamelCase__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowerCAmelCase__ ( a__ ) ->Dict:
'''simple docstring'''
_UpperCamelCase = HashMap(initial_block_size=4 )
_UpperCamelCase = {}
for _, (fun, *args) in enumerate(a__ ):
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
_UpperCamelCase , _UpperCamelCase = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) ->List[Any]:
'''simple docstring'''
def is_public(a__ ) -> bool:
return not name.startswith("_" )
_UpperCamelCase = {name for name in dir({} ) if is_public(a__ )}
_UpperCamelCase = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 82 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.