code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = params
__lowerCamelCase = np.array(lowercase_ )
__lowerCamelCase = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self: List[str] , UpperCamelCase_: str ):
return (self.token_ids[index], self.lengths[index])
def __len__( self: Union[str, Any] ):
return len(self.lengths )
def lowerCAmelCase__ ( self: Optional[int] ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.params.max_model_input_size
__lowerCamelCase = self.lengths > max_len
logger.info(F'Splitting {sum(lowercase_ )} too long sequences.' )
def divide_chunks(UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
__lowerCamelCase = []
__lowerCamelCase = []
if self.params.mlm:
__lowerCamelCase, __lowerCamelCase = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__lowerCamelCase, __lowerCamelCase = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__lowerCamelCase = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__lowerCamelCase = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
__lowerCamelCase = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
__lowerCamelCase = np.array(lowercase_ )
__lowerCamelCase = np.array(lowercase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = len(self )
__lowerCamelCase = self.lengths > 11
__lowerCamelCase = self.token_ids[indices]
__lowerCamelCase = self.lengths[indices]
__lowerCamelCase = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def lowerCAmelCase__ ( self: int ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
__lowerCamelCase = self.params.special_tok_ids["""unk_token"""]
__lowerCamelCase = len(self )
__lowerCamelCase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__lowerCamelCase = (unk_occs / self.lengths) < 0.5
__lowerCamelCase = self.token_ids[indices]
__lowerCamelCase = self.lengths[indices]
__lowerCamelCase = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def lowerCAmelCase__ ( self: Tuple ):
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = [t[0] for t in batch]
__lowerCamelCase = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
__lowerCamelCase = max(lowercase_ )
# Pad token ids
if self.params.mlm:
__lowerCamelCase = self.params.special_tok_ids["""pad_token"""]
else:
__lowerCamelCase = self.params.special_tok_ids["""unk_token"""]
__lowerCamelCase = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
__lowerCamelCase = torch.tensor(tk_ ) # (bs, max_seq_len_)
__lowerCamelCase = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 715 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ):
__lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
__lowerCamelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 80 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = StableUnCLIPPipeline
UpperCAmelCase__ : Tuple = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase__ : Dict = False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = 32
__lowerCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__lowerCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCAmelCase_ , num_layers=1 , )
torch.manual_seed(0 )
__lowerCamelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=lowerCAmelCase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCamelCase = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_ )
__lowerCamelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , )
torch.manual_seed(0 )
__lowerCamelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL()
__lowerCamelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple=0 ):
if str(lowerCAmelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCamelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_ )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
__lowerCamelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCamelCase = pipe("""anime turle""" , generator=lowerCAmelCase_ , output_type="""np""" )
__lowerCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase__ ( self: Dict ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
__lowerCamelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 716 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : Optional[int] = 1000000 , A__ : Tuple = 10 ):
'''simple docstring'''
__lowerCamelCase = defaultdict(A__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__lowerCamelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__lowerCamelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(A__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 717 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 80 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase__ ( A__ : Tuple , A__ : int , A__ : str = "x" , A__ : str = 10**-10 , A__ : Tuple = 1 , ):
'''simple docstring'''
__lowerCamelCase = symbols(A__ )
__lowerCamelCase = lambdify(A__ , A__ )
__lowerCamelCase = lambdify(A__ , diff(A__ , A__ ) )
__lowerCamelCase = starting_point
while True:
if diff_function(A__ ) != 0:
__lowerCamelCase = prev_guess - multiplicity * func(A__ ) / diff_function(
A__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowerCamelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 718 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[str] = ['image_processor', 'tokenizer']
UpperCAmelCase__ : Any = 'BlipImageProcessor'
UpperCAmelCase__ : str = 'AutoTokenizer'
def __init__( self: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# add QFormer tokenizer
__lowerCamelCase = qformer_tokenizer
def __call__( self: int , UpperCamelCase_: Optional[Any] = None , UpperCamelCase_: Tuple = None , UpperCamelCase_: Any = True , UpperCamelCase_: Tuple = False , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Dict = None , UpperCamelCase_: Any = 0 , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: Optional[Any] = None , UpperCamelCase_: List[str] = False , UpperCamelCase_: List[str] = False , UpperCamelCase_: Any = False , UpperCamelCase_: List[Any] = False , UpperCamelCase_: Tuple = False , UpperCamelCase_: List[str] = True , UpperCamelCase_: Optional[Any] = None , **UpperCamelCase_: Tuple , ):
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
__lowerCamelCase = BatchFeature()
if text is not None:
__lowerCamelCase = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
encoding.update(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = self.qformer_tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase = qformer_text_encoding.pop("""input_ids""" )
__lowerCamelCase = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
__lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
encoding.update(SCREAMING_SNAKE_CASE_ )
return encoding
def lowerCAmelCase__ ( self: List[Any] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: Optional[int] , **UpperCamelCase_: Union[str, Any] ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int , **UpperCamelCase_: List[Any] ):
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def lowerCAmelCase__ ( cls: int , UpperCamelCase_: Optional[Any] , **UpperCamelCase_: List[Any] ):
__lowerCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""qformer_tokenizer""" )
__lowerCamelCase = cls._get_arguments_from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
args.append(SCREAMING_SNAKE_CASE_ )
return cls(*SCREAMING_SNAKE_CASE_ )
| 719 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
UpperCAmelCase_ = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
UpperCAmelCase_ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
UpperCAmelCase_ = BeautifulSoup(res.text, 'html.parser')
UpperCAmelCase_ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get("href")}""")
| 720 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( lowercase__ , unittest.TestCase):
UpperCAmelCase__ : Any = BartphoTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = True
def lowerCAmelCase__ ( self: Any ):
super().setUp()
__lowerCamelCase = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__lowerCamelCase = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: Tuple ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = '''This is a là test'''
__lowerCamelCase = '''This is a<unk><unk> test'''
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = '''This is a là test'''
__lowerCamelCase = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__lowerCamelCase = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
| 721 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 80 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
if "cls_token" in name:
__lowerCamelCase = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
__lowerCamelCase = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
__lowerCamelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
__lowerCamelCase = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowerCamelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCamelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCamelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCamelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def lowerCamelCase__ ( A__ : List[str] , A__ : Dict ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
__lowerCamelCase = key.split(""".""" )
__lowerCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
__lowerCamelCase = config.decoder_hidden_size
__lowerCamelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
elif "bias" in key:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = config.hidden_size
__lowerCamelCase = """vit.encoder.layer."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
elif "bias" in key:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = val
return orig_state_dict
def lowerCamelCase__ ( A__ : Tuple , A__ : int ):
'''simple docstring'''
__lowerCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
elif "huge" in checkpoint_url:
__lowerCamelCase = 14
__lowerCamelCase = 1280
__lowerCamelCase = 5120
__lowerCamelCase = 32
__lowerCamelCase = 16
__lowerCamelCase = ViTMAEForPreTraining(_lowerCAmelCase )
__lowerCamelCase = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
__lowerCamelCase = ViTMAEImageProcessor(size=config.image_size )
__lowerCamelCase = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
__lowerCamelCase = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
__lowerCamelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
__lowerCamelCase = ViTMAEImageProcessor(size=config.image_size )
__lowerCamelCase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__lowerCamelCase = model(**_lowerCAmelCase )
__lowerCamelCase = outputs.logits
if "large" in checkpoint_url:
__lowerCamelCase = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
__lowerCamelCase = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
__lowerCamelCase = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 700 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = 'nezha'
def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = max_relative_position
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
| 80 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( A__ : str , A__ : Optional[int] ):
'''simple docstring'''
return (-y * np.log(UpperCamelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( A__ : Dict , A__ : List[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = np.dot(UpperCamelCase__ , UpperCamelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase__ ) ) )
def lowerCamelCase__ ( A__ : List[Any] , A__ : List[Any] , A__ : Dict , A__ : Any=70000 ):
'''simple docstring'''
__lowerCamelCase = np.zeros(x.shape[1] )
for iterations in range(UpperCamelCase__ ):
__lowerCamelCase = np.dot(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = sigmoid_function(UpperCamelCase__ )
__lowerCamelCase = np.dot(x.T , h - y ) / y.size
__lowerCamelCase = theta - alpha * gradient # updating the weights
__lowerCamelCase = np.dot(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = sigmoid_function(UpperCamelCase__ )
__lowerCamelCase = cost_function(UpperCamelCase__ , UpperCamelCase__ )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCAmelCase_ = datasets.load_iris()
UpperCAmelCase_ = iris.data[:, :2]
UpperCAmelCase_ = (iris.target != 0) * 1
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return sigmoid_function(
np.dot(UpperCamelCase__ , UpperCamelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
(UpperCAmelCase_) = (x[:, 0].min(), x[:, 0].max())
(UpperCAmelCase_) = (x[:, 1].min(), x[:, 1].max())
(UpperCAmelCase_) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCAmelCase_ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCAmelCase_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 702 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['CLIPFeatureExtractor']
UpperCAmelCase_ = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowerCamelCase = 192
__lowerCamelCase = 768
__lowerCamelCase = 12
__lowerCamelCase = 3
__lowerCamelCase = [800, 1333]
__lowerCamelCase = False
elif yolos_name == "yolos_s_dWr":
__lowerCamelCase = 330
__lowerCamelCase = 14
__lowerCamelCase = 6
__lowerCamelCase = 1320
elif "yolos_s" in yolos_name:
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 6
elif "yolos_b" in yolos_name:
__lowerCamelCase = [800, 1344]
__lowerCamelCase = 91
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """coco-detection-id2label.json"""
__lowerCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[-config.hidden_size :, :]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
if "backbone" in name:
__lowerCamelCase = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__lowerCamelCase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__lowerCamelCase = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__lowerCamelCase = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowerCamelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__lowerCamelCase = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__lowerCamelCase = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__lowerCamelCase = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def lowerCamelCase__ ( A__ : str , A__ : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(A__ )
if "qkv" in key:
__lowerCamelCase = key.split(""".""" )
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[
dim : dim * 2, :
]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = val
return orig_state_dict
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : str , A__ : List[Any] , A__ : Tuple , A__ : Tuple = False ):
'''simple docstring'''
__lowerCamelCase = get_yolos_config(A__ )
# load original state_dict
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )["""model"""]
# load 🤗 model
__lowerCamelCase = YolosForObjectDetection(A__ )
model.eval()
__lowerCamelCase = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
# Check outputs on an image, prepared by YolosImageProcessor
__lowerCamelCase = 800 if yolos_name != """yolos_ti""" else 512
__lowerCamelCase = YolosImageProcessor(format="""coco_detection""" , size=A__ )
__lowerCamelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowerCamelCase = model(**A__ )
__lowerCamelCase, __lowerCamelCase = outputs.logits, outputs.pred_boxes
__lowerCamelCase, __lowerCamelCase = None, None
if yolos_name == "yolos_ti":
__lowerCamelCase = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__lowerCamelCase = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__lowerCamelCase = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__lowerCamelCase = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__lowerCamelCase = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__lowerCamelCase = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__lowerCamelCase = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__lowerCamelCase = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__lowerCamelCase = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__lowerCamelCase = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , A__ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , A__ , atol=1E-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
__lowerCamelCase = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
__lowerCamelCase = model_mapping[yolos_name]
image_processor.push_to_hub(A__ , organization="""hustvl""" )
model.push_to_hub(A__ , organization="""hustvl""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 704 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 0 |
UpperCAmelCase_ = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 705 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCAmelCase_ = get_logger(__name__)
UpperCAmelCase_ = Path(__file__).parent / "model_card_template.md"
UpperCAmelCase_ = uuida().hex
UpperCAmelCase_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def lowerCamelCase__ ( A__ : Union[Dict, str, None] = None ):
'''simple docstring'''
__lowerCamelCase = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
return ua
def lowerCamelCase__ ( A__ : str , A__ : Optional[str] = None , A__ : Optional[str] = None ):
'''simple docstring'''
if token is None:
__lowerCamelCase = HfFolder.get_token()
if organization is None:
__lowerCamelCase = whoami(_SCREAMING_SNAKE_CASE )["""name"""]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def lowerCamelCase__ ( A__ : int , A__ : Optional[Any] ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(_SCREAMING_SNAKE_CASE , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
__lowerCamelCase = args.hub_token if hasattr(_SCREAMING_SNAKE_CASE , """hub_token""" ) else None
__lowerCamelCase = get_full_repo_name(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
__lowerCamelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_SCREAMING_SNAKE_CASE , model_name=_SCREAMING_SNAKE_CASE , repo_name=_SCREAMING_SNAKE_CASE , dataset_name=args.dataset_name if hasattr(_SCREAMING_SNAKE_CASE , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_SCREAMING_SNAKE_CASE , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_SCREAMING_SNAKE_CASE , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(_SCREAMING_SNAKE_CASE , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(_SCREAMING_SNAKE_CASE , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_SCREAMING_SNAKE_CASE , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_SCREAMING_SNAKE_CASE , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(_SCREAMING_SNAKE_CASE , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(_SCREAMING_SNAKE_CASE , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
__lowerCamelCase = os.path.join(args.output_dir , """README.md""" )
model_card.save(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( A__ : Optional[str] , A__ : Optional[str] = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
__lowerCamelCase = str(Path(_SCREAMING_SNAKE_CASE ).as_posix() )
__lowerCamelCase = re.search(R"""snapshots/([^/]+)/""" , _SCREAMING_SNAKE_CASE )
if search is None:
return None
__lowerCamelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_SCREAMING_SNAKE_CASE ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCAmelCase_ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
UpperCAmelCase_ = os.path.join(hf_cache_home, 'diffusers')
def lowerCamelCase__ ( A__ : Optional[str] = None , A__ : Optional[str] = None ):
'''simple docstring'''
if new_cache_dir is None:
__lowerCamelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
__lowerCamelCase = old_diffusers_cache
__lowerCamelCase = Path(_SCREAMING_SNAKE_CASE ).expanduser()
__lowerCamelCase = Path(_SCREAMING_SNAKE_CASE ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__lowerCamelCase = new_cache_dir / old_blob_path.relative_to(_SCREAMING_SNAKE_CASE )
new_blob_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
os.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
try:
os.symlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCAmelCase_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
UpperCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
UpperCAmelCase_ = int(f.read())
except ValueError:
UpperCAmelCase_ = 0
if cache_version < 1:
UpperCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
UpperCAmelCase_ = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def lowerCamelCase__ ( A__ : str , A__ : Optional[str] = None ):
'''simple docstring'''
if variant is not None:
__lowerCamelCase = weights_name.split(""".""" )
__lowerCamelCase = splits[:-1] + [variant] + splits[-1:]
__lowerCamelCase = """.""".join(_SCREAMING_SNAKE_CASE )
return weights_name
def lowerCamelCase__ ( A__ : str , *,
A__ : Tuple , A__ : Optional[Any] , A__ : int , A__ : str , A__ : Union[str, Any] , A__ : Any , A__ : str , A__ : int , A__ : Tuple , A__ : str , A__ : List[Any]=None , ):
'''simple docstring'''
__lowerCamelCase = str(_SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
return pretrained_model_name_or_path
elif os.path.isdir(_SCREAMING_SNAKE_CASE ):
if os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
# Load from a PyTorch checkpoint
__lowerCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
__lowerCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_SCREAMING_SNAKE_CASE ).base_version ) >= version.parse("""0.20.0""" )
):
try:
__lowerCamelCase = hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , _SCREAMING_SNAKE_CASE , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}\' so that the correct variant file can be added.' , _SCREAMING_SNAKE_CASE , )
try:
# 2. Load model file as usual
__lowerCamelCase = hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"""listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"""this model name. Check the model page at """
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.""" )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"""\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. """
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 706 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase__ ( A__ : Any , A__ : Any , A__ : Any , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowerCamelCase = getattr(A__ , A__ )
if weight_type is not None:
__lowerCamelCase = getattr(A__ , A__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "running_mean":
__lowerCamelCase = value
elif weight_type == "running_var":
__lowerCamelCase = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase = value
elif weight_type == "inv_freq":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase__ ( A__ : List[str] , A__ : Optional[int] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == """group""" , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(A__ )[0].split(""".""" )[-2]
__lowerCamelCase = mapped_key.replace("""*""" , A__ )
if "pos_bias_u" in name:
__lowerCamelCase = None
elif "pos_bias_v" in name:
__lowerCamelCase = None
elif "weight_g" in name:
__lowerCamelCase = """weight_g"""
elif "weight_v" in name:
__lowerCamelCase = """weight_v"""
elif "bias" in name:
__lowerCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = """weight"""
elif "running_mean" in name:
__lowerCamelCase = """running_mean"""
elif "inv_freq" in name:
__lowerCamelCase = """inv_freq"""
elif "running_var" in name:
__lowerCamelCase = """running_var"""
elif "num_batches_tracked" in name:
__lowerCamelCase = """num_batches_tracked"""
else:
__lowerCamelCase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(f'Unused weights: {unused_weights}' )
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int] , A__ : str , A__ : int , A__ : Any ):
'''simple docstring'''
__lowerCamelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCamelCase = name.split(""".""" )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowerCamelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowerCamelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__lowerCamelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__lowerCamelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A__ )
@torch.no_grad()
def lowerCamelCase__ ( A__ : int , A__ : Tuple , A__ : Optional[Any]=None , A__ : Dict=None , A__ : Union[str, Any]=True ):
'''simple docstring'''
if config_path is not None:
__lowerCamelCase = WavaVecaConformerConfig.from_pretrained(A__ , hidden_act="""swish""" )
else:
__lowerCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowerCamelCase = """rotary"""
if is_finetuned:
if dict_path:
__lowerCamelCase = Dictionary.load(A__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase = target_dict.pad_index
__lowerCamelCase = target_dict.bos_index
__lowerCamelCase = target_dict.eos_index
__lowerCamelCase = len(target_dict.symbols )
__lowerCamelCase = os.path.join(A__ , """vocab.json""" )
if not os.path.isdir(A__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(A__ ) )
return
os.makedirs(A__ , exist_ok=A__ )
__lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase = 0
__lowerCamelCase = 1
with open(A__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(A__ , A__ )
__lowerCamelCase = WavaVecaCTCTokenizer(
A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=A__ , )
__lowerCamelCase = True if config.feat_extract_norm == """layer""" else False
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
__lowerCamelCase = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
processor.save_pretrained(A__ )
__lowerCamelCase = WavaVecaConformerForCTC(A__ )
else:
__lowerCamelCase = WavaVecaConformerForPreTraining(A__ )
if is_finetuned:
__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowerCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__lowerCamelCase = fairseq.tasks.setup_task(A__ )
__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A__ )
__lowerCamelCase = model[0].eval()
recursively_load_weights(A__ , A__ , not is_finetuned )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCAmelCase_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 707 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80 | 0 |
import heapq
import sys
import numpy as np
UpperCAmelCase_ = tuple[int, int]
class lowerCamelCase__:
def __init__( self: Tuple ):
__lowerCamelCase = []
__lowerCamelCase = set()
def lowerCAmelCase__ ( self: List[str] ):
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def lowerCAmelCase__ ( self: int ):
return len(self.elements ) == 0
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: str ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCamelCase__ )
else:
# update
# print("update", item)
__lowerCamelCase = []
(__lowerCamelCase) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(__lowerCamelCase) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int ):
if item in self.set:
self.set.remove(UpperCamelCase__ )
__lowerCamelCase = []
(__lowerCamelCase) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(__lowerCamelCase) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCAmelCase__ ( self: int ):
return self.elements[0][1]
def lowerCAmelCase__ ( self: Dict ):
(__lowerCamelCase) = heapq.heappop(self.elements )
self.set.remove(UpperCamelCase__ )
return (priority, item)
def lowerCamelCase__ ( A__ : TPos , A__ : TPos ):
__lowerCamelCase = np.array(lowerCAmelCase__ )
__lowerCamelCase = np.array(lowerCAmelCase__ )
return np.linalg.norm(a - b )
def lowerCamelCase__ ( A__ : TPos , A__ : TPos ):
return consistent_heuristic(lowerCAmelCase__ , lowerCAmelCase__ ) // t
def lowerCamelCase__ ( A__ : TPos , A__ : TPos ):
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCamelCase__ ( A__ : TPos , A__ : int , A__ : TPos , A__ : dict[TPos, float] ):
__lowerCamelCase = g_function[start] + Wa * heuristics[i](lowerCAmelCase__ , lowerCAmelCase__ )
return ans
def lowerCamelCase__ ( A__ : Optional[int] , A__ : int , A__ : List[Any] ):
__lowerCamelCase = np.chararray((n, n) )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
__lowerCamelCase = '*'
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if (j, (n - 1) - i) in blocks:
__lowerCamelCase = '#'
__lowerCamelCase = '-'
__lowerCamelCase = back_pointer[goal]
while x != start:
(__lowerCamelCase) = x
# print(x)
__lowerCamelCase = '-'
__lowerCamelCase = back_pointer[x]
__lowerCamelCase = '-'
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowerCamelCase = back_pointer[goal]
while x != start:
print(lowerCAmelCase__ , end=""" """ )
__lowerCamelCase = back_pointer[x]
print(lowerCAmelCase__ )
sys.exit()
def lowerCamelCase__ ( A__ : TPos ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[Any] , A__ : str , A__ : List[str] , A__ : Union[str, Any] , A__ : List[str] , A__ : Any , A__ : Dict , ):
for itera in range(lowerCAmelCase__ ):
open_list[itera].remove_element(lowerCAmelCase__ )
# print("s", s)
# print("j", j)
(__lowerCamelCase) = s
__lowerCamelCase = (x - 1, y)
__lowerCamelCase = (x + 1, y)
__lowerCamelCase = (x, y + 1)
__lowerCamelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCAmelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCAmelCase__ )
__lowerCamelCase = -1
__lowerCamelCase = float("""inf""" )
if valid(lowerCAmelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__lowerCamelCase = g_function[s] + 1
__lowerCamelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCAmelCase__ , key(lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCAmelCase__ ):
if key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) <= Wa * key(
lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ ):
open_list[j].put(
lowerCAmelCase__ , key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowerCamelCase__ ( ):
__lowerCamelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
UpperCAmelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCAmelCase_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCAmelCase_ = make_common_ground()
UpperCAmelCase_ = blocks_blk
# hyper parameters
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
UpperCAmelCase_ = 20
UpperCAmelCase_ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCAmelCase_ = (0, 0)
UpperCAmelCase_ = (n - 1, n - 1)
UpperCAmelCase_ = 1
def lowerCamelCase__ ( A__ : TPos , A__ : TPos , A__ : int ):
__lowerCamelCase = {start: 0, goal: float("""inf""" )}
__lowerCamelCase = {start: -1, goal: -1}
__lowerCamelCase = []
__lowerCamelCase = set()
for i in range(lowerCAmelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCAmelCase__ , key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
__lowerCamelCase = []
__lowerCamelCase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCAmelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__lowerCamelCase = open_list[i].top_show()
visited.add(lowerCAmelCase__ )
expand_state(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
close_list_inad.append(lowerCAmelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
__lowerCamelCase = open_list[0].top_show()
visited.add(lowerCAmelCase__ )
expand_state(
lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
close_list_anchor.append(lowerCAmelCase__ )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCAmelCase__ ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 708 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase_ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.')
@require_torch
@require_tf
@slow
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: List[Any] = None , UpperCamelCase_: Dict = None , UpperCamelCase_: int = True , ):
__lowerCamelCase = [file for file in os.listdir(lowerCamelCase_ ) if os.path.isfile(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )]
if identifier is not None:
__lowerCamelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
for n_ in n_identifier:
__lowerCamelCase = [file for file in files if n_ not in file]
else:
__lowerCamelCase = [file for file in files if n_identifier not in file]
__lowerCamelCase = ignore_files or []
ignore_files.append("""__init__.py""" )
__lowerCamelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , lowerCamelCase_ )
if only_modules:
__lowerCamelCase = file.split(""".""" )[0]
try:
__lowerCamelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
__lowerCamelCase = doctest.DocTestSuite(lowerCamelCase_ )
__lowerCamelCase = unittest.TextTestRunner().run(lowerCamelCase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
__lowerCamelCase = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = Path("""src/transformers""" )
__lowerCamelCase = """modeling"""
__lowerCamelCase = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCamelCase_ , identifier=lowerCamelCase_ , ignore_files=lowerCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = Path("""src/transformers""" )
__lowerCamelCase = """tokenization"""
self.analyze_directory(lowerCamelCase_ , identifier=lowerCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = Path("""src/transformers""" )
__lowerCamelCase = """configuration"""
self.analyze_directory(lowerCamelCase_ , identifier=lowerCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = Path("""src/transformers""" )
__lowerCamelCase = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCamelCase_ , n_identifier=lowerCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = Path("""docs/source""" )
__lowerCamelCase = ["""favicon.ico"""]
self.analyze_directory(lowerCamelCase_ , ignore_files=lowerCamelCase_ , only_modules=lowerCamelCase_ )
| 709 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 | 0 |
def lowerCamelCase__ ( A__ : str , A__ : int ):
'''simple docstring'''
__lowerCamelCase = word.split()
def justify(A__ : list , A__ : int , A__ : int ) -> str:
__lowerCamelCase = max_width - width
__lowerCamelCase = len(A__ )
if len(A__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__lowerCamelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__lowerCamelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__lowerCamelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(A__ ):
num_spaces_between_words_list[i] += 1
__lowerCamelCase = []
for i in range(A__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(A__ )
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = 0
for word in words:
if width + len(A__ ) + len(A__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(A__ )
width += len(A__ )
else:
# justify the line and add it to result
answer.append(justify(A__ , A__ , A__ ) )
# reset new line and new width
__lowerCamelCase, __lowerCamelCase = [word], len(A__ )
__lowerCamelCase = max_width - width - len(A__ )
answer.append(""" """.join(A__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 710 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase__( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionPanoramaPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowerCamelCase = DDIMScheduler()
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCamelCase = CLIPTextModel(_UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str]=0 ):
__lowerCamelCase = torch.manual_seed(_UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(_UpperCAmelCase )
__lowerCamelCase = sd_pipe(**_UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: Dict ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: List[Any] ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(_UpperCAmelCase )
__lowerCamelCase = '''french fries'''
__lowerCamelCase = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(_UpperCAmelCase )
__lowerCamelCase = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__lowerCamelCase = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(_UpperCAmelCase )
__lowerCamelCase = sd_pipe(**_UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_UpperCAmelCase )
__lowerCamelCase = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(_UpperCAmelCase )
__lowerCamelCase = sd_pipe(**_UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[int]=0 ):
__lowerCamelCase = torch.manual_seed(_UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = '''stabilityai/stable-diffusion-2-base'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**_UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__lowerCamelCase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_UpperCAmelCase )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**_UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
__lowerCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = 0
def callback_fn(UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] ) -> None:
__lowerCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__lowerCamelCase = latents[0, -3:, -3:, -1]
__lowerCamelCase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
__lowerCamelCase = latents[0, -3:, -3:, -1]
__lowerCamelCase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__lowerCamelCase = False
__lowerCamelCase = '''stabilityai/stable-diffusion-2-base'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
__lowerCamelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self: Optional[int] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = '''stabilityai/stable-diffusion-2-base'''
__lowerCamelCase = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
__lowerCamelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**_UpperCAmelCase )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 712 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__( _a):
UpperCAmelCase__ : List[Any] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'})
UpperCAmelCase__ : Any = field(default=_a , metadata={'help': 'Whether to SortishSamler or not.'})
UpperCAmelCase__ : Optional[Any] = field(
default=_a , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'})
UpperCAmelCase__ : Union[str, Any] = field(default=_a , metadata={'help': 'whether to use adafactor'})
UpperCAmelCase__ : List[Any] = field(
default=_a , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'})
UpperCAmelCase__ : Union[str, Any] = field(
default=_a , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'})
UpperCAmelCase__ : Dict = field(default=_a , metadata={'help': 'Dropout probability. Goes into model.config.'})
UpperCAmelCase__ : List[Any] = field(
default=_a , metadata={'help': 'Attention dropout probability. Goes into model.config.'})
UpperCAmelCase__ : str = field(
default='linear' , metadata={'help': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}'''} , )
| 713 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase_ = 250_004
UpperCAmelCase_ = 250_020
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = MBartaaTokenizer
UpperCAmelCase__ : Optional[int] = MBartaaTokenizerFast
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[int] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = MBartaaTokenizer(__UpperCamelCase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """<s>"""
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__UpperCamelCase ) , 10_54 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = MBartaaTokenizer(__UpperCamelCase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=__UpperCamelCase )
__lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
# fmt: off
__lowerCamelCase = {"""input_ids""": [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def lowerCAmelCase__ ( self: Dict ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowerCamelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
__lowerCamelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(__UpperCamelCase )
__lowerCamelCase = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__lowerCamelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(__UpperCamelCase )
__lowerCamelCase = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=True
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
__lowerCamelCase = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(__UpperCamelCase )
__lowerCamelCase = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=False
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
__lowerCamelCase = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCamelCase = tokenizer_r.from_pretrained(__UpperCamelCase )
__lowerCamelCase = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : str = 'facebook/mbart-large-50-one-to-many-mmt'
UpperCAmelCase__ : str = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
UpperCAmelCase__ : Optional[int] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
UpperCAmelCase__ : Dict = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__lowerCamelCase = 1
return cls
def lowerCAmelCase__ ( self: Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 25_00_38 )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def lowerCAmelCase__ ( self: Tuple ):
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
__lowerCamelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowerCamelCase = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
__lowerCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __UpperCamelCase )
__lowerCamelCase = 10
__lowerCamelCase = self.tokenizer(__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
def lowerCAmelCase__ ( self: Any ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_53, 25_00_01] )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCamelCase )
__lowerCamelCase = MBartaaTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCamelCase )
@require_torch
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors="""pt""" )
__lowerCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__lowerCamelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowerCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.tokenizer(self.src_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=3 , return_tensors="""pt""" )
__lowerCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=10 , return_tensors="""pt""" )
__lowerCamelCase = targets["""input_ids"""]
__lowerCamelCase = shift_tokens_right(__UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# en_XX, A, test, EOS
"""input_ids""": [[25_00_04, 62, 30_34, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 714 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase__ ( A__ : str=2 , A__ : Any=3 , A__ : int=16 , A__ : int = 10 , A__ : int = 2 ):
'''simple docstring'''
def get_dataset(A__ : List[Any] ):
__lowerCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_UpperCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__lowerCamelCase = get_dataset(_UpperCamelCase )
__lowerCamelCase = get_dataset(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , batch_size=_UpperCamelCase , num_workers=4 )
__lowerCamelCase = DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , batch_size=_UpperCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase__ ( A__ : str , A__ : Dict , A__ : Optional[int] , A__ : Optional[int] , A__ : str , A__ : Tuple=None ):
'''simple docstring'''
__lowerCamelCase = []
for epoch in range(_UpperCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
__lowerCamelCase, __lowerCamelCase = batch
__lowerCamelCase = model(_UpperCamelCase )
__lowerCamelCase = torch.nn.functional.mse_loss(_UpperCamelCase , _UpperCamelCase )
accelerator.backward(_UpperCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase__( nn.Module):
def __init__( self: List[str] ):
super().__init__()
__lowerCamelCase = nn.Parameter(torch.randn(1 ) )
__lowerCamelCase = nn.Parameter(torch.randn(1 ) )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any ):
return x * self.a + self.b
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase_ , automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowerCAmelCase__ ( self: Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
# Train baseline
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
__lowerCamelCase = os.path.join(UpperCamelCase_ , """initial""" )
accelerator.save_state(UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
__lowerCamelCase = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
__lowerCamelCase = os.path.join(UpperCamelCase_ , """checkpoint""" )
accelerator.save_state(UpperCamelCase_ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase_ )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
__lowerCamelCase = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase_ )
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = torch.tensor([1, 2, 3] )
__lowerCamelCase = torch.tensor([2, 3, 4] )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(net.parameters() )
__lowerCamelCase = Accelerator()
with self.assertRaises(UpperCamelCase_ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def lowerCAmelCase__ ( self: Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase_ , step_size=1 , gamma=0.99 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
__lowerCamelCase = scheduler.state_dict()
train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(UpperCamelCase_ , scheduler.state_dict() )
def lowerCAmelCase__ ( self: str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ , total_limit=2 )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase = accelerator.prepare(UpperCamelCase_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase_ = '/tmp/accelerate/state_checkpointing'
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
UpperCAmelCase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCAmelCase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
UpperCAmelCase_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 715 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ):
__lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
__lowerCamelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 80 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCamelCase__( __UpperCAmelCase , unittest.TestCase):
UpperCAmelCase__ : str = RoFormerTokenizer
UpperCAmelCase__ : Tuple = RoFormerTokenizerFast
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : int = True
def lowerCAmelCase__ ( self: List[str] ):
super().setUp()
def lowerCAmelCase__ ( self: Any , **UpperCamelCase_: Dict ):
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , **UpperCamelCase_: List[str] ):
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = """永和服装饰品有限公司,今天天气非常好"""
__lowerCamelCase = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase, __lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase, __lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
pass
def lowerCAmelCase__ ( self: List[str] ):
pass
def lowerCAmelCase__ ( self: Dict ):
pass | 716 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 0 |
UpperCAmelCase_ = 256
# Modulus to hash a string
UpperCAmelCase_ = 1_000_003
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
__lowerCamelCase = len(A__ )
if p_len > t_len:
return False
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(A__ ):
__lowerCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__lowerCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__lowerCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__lowerCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = '''abc1abc12'''
__lowerCamelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__lowerCamelCase = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(A__ , A__ ) and not rabin_karp(A__ , A__ )
# Test 2)
__lowerCamelCase = '''ABABX'''
__lowerCamelCase = '''ABABZABABYABABX'''
assert rabin_karp(A__ , A__ )
# Test 3)
__lowerCamelCase = '''AAAB'''
__lowerCamelCase = '''ABAAAAAB'''
assert rabin_karp(A__ , A__ )
# Test 4)
__lowerCamelCase = '''abcdabcy'''
__lowerCamelCase = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(A__ , A__ )
# Test 5)
__lowerCamelCase = '''Lü'''
__lowerCamelCase = '''Lüsai'''
assert rabin_karp(A__ , A__ )
__lowerCamelCase = '''Lue'''
assert not rabin_karp(A__ , A__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 717 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 80 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = 9, 14 # noqa: F841
__lowerCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCamelCase = defaultdict(snake_case_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowerCamelCase = mst(snake_case_ )
__lowerCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowerCamelCase = tuple(answer[:2] )
__lowerCamelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 718 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase__( unittest.TestCase):
def __init__( self: int , UpperCamelCase_: Tuple , UpperCamelCase_: Any=7 , UpperCamelCase_: str=3 , UpperCamelCase_: int=18 , UpperCamelCase_: Union[str, Any]=30 , UpperCamelCase_: List[Any]=4_00 , UpperCamelCase_: str=True , UpperCamelCase_: Dict=None , UpperCamelCase_: Tuple=True , ):
__lowerCamelCase = size if size is not None else {"""height""": 18, """width""": 18}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = do_normalize
def lowerCAmelCase__ ( self: Union[str, Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase__( lowercase_ , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self: Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , """clusters""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase_ , """do_normalize""" ) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(UpperCamelCase_ , """image_processor.json""" )
image_processor_first.to_json_file(UpperCamelCase_ )
__lowerCamelCase = self.image_processing_class.from_json_file(UpperCamelCase_ ).to_dict()
__lowerCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = self.image_processing_class.from_pretrained(UpperCamelCase_ ).to_dict()
__lowerCamelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCamelCase_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase__ ( self: Optional[int] ):
pass
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
__lowerCamelCase = Image.open(dataset[4]["""file"""] )
__lowerCamelCase = Image.open(dataset[5]["""file"""] )
__lowerCamelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
__lowerCamelCase = prepare_images()
# test non-batched
__lowerCamelCase = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
__lowerCamelCase = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCamelCase_ )
# test batched
__lowerCamelCase = image_processing(UpperCamelCase_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
__lowerCamelCase = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCamelCase_ )
| 719 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
__lowerCamelCase = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
__lowerCamelCase = column
continue
__lowerCamelCase = column / magnitude
# Subtract to cancel term
__lowerCamelCase = current_set[0]
__lowerCamelCase = [first_row]
__lowerCamelCase = current_set[1::]
for row in current_set:
__lowerCamelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__lowerCamelCase = final_set[0]
__lowerCamelCase = []
__lowerCamelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__lowerCamelCase = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCamelCase__ )
__lowerCamelCase = resultant
return final_set
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
__lowerCamelCase = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCamelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
__lowerCamelCase = equations.copy()
if any(0 in row for row in data_set ):
__lowerCamelCase = data_set.copy()
__lowerCamelCase = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
__lowerCamelCase = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCamelCase__ )
__lowerCamelCase = data_set.copy()
__lowerCamelCase = simplify(lowerCamelCase__ )
__lowerCamelCase = simplified[::-1]
__lowerCamelCase = []
for row in simplified:
__lowerCamelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__lowerCamelCase = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
__lowerCamelCase = temp_row[1::]
__lowerCamelCase = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
__lowerCamelCase = []
for item in solutions:
final.append(float(round(lowerCamelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 720 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 | 0 |
UpperCAmelCase_ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 721 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 80 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : BigBirdConfig
UpperCAmelCase__ : jnp.dtype = jnp.floataa
UpperCAmelCase__ : bool = True
def lowerCAmelCase__ ( self: List[str] ):
super().setup()
__lowerCamelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self: Optional[Any] , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = super().__call__(*UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[Any] = FlaxBigBirdForNaturalQuestionsModule
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Union[str, Any] , A__ : Optional[Any] , A__ : str , A__ : List[str] , A__ : List[str] ):
'''simple docstring'''
def cross_entropy(A__ : Dict , A__ : List[str] , A__ : Tuple=None ):
__lowerCamelCase = logits.shape[-1]
__lowerCamelCase = (labels[..., None] == jnp.arange(A__ )[None]).astype("""f4""" )
__lowerCamelCase = jax.nn.log_softmax(A__ , axis=-1 )
__lowerCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowerCamelCase = reduction(A__ )
return loss
__lowerCamelCase = partial(A__ , reduction=jnp.mean )
__lowerCamelCase = cross_entropy(A__ , A__ )
__lowerCamelCase = cross_entropy(A__ , A__ )
__lowerCamelCase = cross_entropy(A__ , A__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : str = "google/bigbird-roberta-base"
UpperCAmelCase__ : int = 3000
UpperCAmelCase__ : int = 1_0500
UpperCAmelCase__ : int = 128
UpperCAmelCase__ : int = 3
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 5
# tx_args
UpperCAmelCase__ : float = 3E-5
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 2_0000
UpperCAmelCase__ : float = 0.00_95
UpperCAmelCase__ : str = "bigbird-roberta-natural-questions"
UpperCAmelCase__ : str = "training-expt"
UpperCAmelCase__ : str = "data/nq-training.jsonl"
UpperCAmelCase__ : str = "data/nq-validation.jsonl"
def lowerCAmelCase__ ( self: Optional[int] ):
os.makedirs(self.base_dir , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(self.base_dir , self.save_dir )
__lowerCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : int = 4096 # no dynamic padding on TPUs
def __call__( self: Union[str, Any] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.collate_fn(UpperCamelCase_ )
__lowerCamelCase = jax.tree_util.tree_map(UpperCamelCase_ , UpperCamelCase_ )
return batch
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Dict ):
__lowerCamelCase, __lowerCamelCase = self.fetch_inputs(features["""input_ids"""] )
__lowerCamelCase = {
"""input_ids""": jnp.array(UpperCamelCase_ , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(UpperCamelCase_ , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def lowerCAmelCase__ ( self: str , UpperCamelCase_: list ):
__lowerCamelCase = [self._fetch_inputs(UpperCamelCase_ ) for ids in input_ids]
return zip(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: list ):
__lowerCamelCase = [1 for _ in range(len(UpperCamelCase_ ) )]
while len(UpperCamelCase_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCamelCase__ ( A__ : List[Any] , A__ : Optional[Any] , A__ : List[Any]=None ):
'''simple docstring'''
if seed is not None:
__lowerCamelCase = dataset.shuffle(seed=A__ )
for i in range(len(A__ ) // batch_size ):
__lowerCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(A__ )
@partial(jax.pmap , axis_name="""batch""" )
def lowerCamelCase__ ( A__ : Tuple , A__ : Dict , **A__ : Any ):
'''simple docstring'''
def loss_fn(A__ : Optional[int] ):
__lowerCamelCase = model_inputs.pop("""start_labels""" )
__lowerCamelCase = model_inputs.pop("""end_labels""" )
__lowerCamelCase = model_inputs.pop("""pooled_labels""" )
__lowerCamelCase = state.apply_fn(**A__ , params=A__ , dropout_rng=A__ , train=A__ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = outputs
return state.loss_fn(
A__ , A__ , A__ , A__ , A__ , A__ , )
__lowerCamelCase, __lowerCamelCase = jax.random.split(A__ )
__lowerCamelCase = jax.value_and_grad(A__ )
__lowerCamelCase, __lowerCamelCase = grad_fn(state.params )
__lowerCamelCase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
__lowerCamelCase = jax.lax.pmean(A__ , """batch""" )
__lowerCamelCase = state.apply_gradients(grads=A__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def lowerCamelCase__ ( A__ : Dict , **A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = model_inputs.pop("""start_labels""" )
__lowerCamelCase = model_inputs.pop("""end_labels""" )
__lowerCamelCase = model_inputs.pop("""pooled_labels""" )
__lowerCamelCase = state.apply_fn(**A__ , params=state.params , train=A__ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = outputs
__lowerCamelCase = state.loss_fn(A__ , A__ , A__ , A__ , A__ , A__ )
__lowerCamelCase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class lowerCamelCase__( train_state.TrainState):
UpperCAmelCase__ : Callable = struct.field(pytree_node=__lowerCamelCase)
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : Args
UpperCAmelCase__ : Callable
UpperCAmelCase__ : Callable
UpperCAmelCase__ : Callable
UpperCAmelCase__ : Callable
UpperCAmelCase__ : wandb
UpperCAmelCase__ : Callable = None
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any]=None ):
__lowerCamelCase = model.params
__lowerCamelCase = TrainState.create(
apply_fn=model.__call__ , params=UpperCamelCase_ , tx=UpperCamelCase_ , loss_fn=UpperCamelCase_ , )
if ckpt_dir is not None:
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = restore_checkpoint(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
__lowerCamelCase, __lowerCamelCase = build_tx(**UpperCamelCase_ )
__lowerCamelCase = train_state.TrainState(
step=UpperCamelCase_ , apply_fn=model.__call__ , params=UpperCamelCase_ , tx=UpperCamelCase_ , opt_state=UpperCamelCase_ , )
__lowerCamelCase = args
__lowerCamelCase = data_collator
__lowerCamelCase = lr
__lowerCamelCase = params
__lowerCamelCase = jax_utils.replicate(UpperCamelCase_ )
return state
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.args
__lowerCamelCase = len(UpperCamelCase_ ) // args.batch_size
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(UpperCamelCase_ , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCamelCase = get_batched_dataset(UpperCamelCase_ , args.batch_size , seed=UpperCamelCase_ )
__lowerCamelCase = 0
for batch in tqdm(UpperCamelCase_ , total=UpperCamelCase_ , desc=F'Running EPOCH-{epoch}' ):
__lowerCamelCase = self.data_collator(UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.train_step_fn(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
__lowerCamelCase = jax_utils.unreplicate(state.step )
__lowerCamelCase = running_loss.item() / i
__lowerCamelCase = self.scheduler_fn(state_step - 1 )
__lowerCamelCase = self.evaluate(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(UpperCamelCase_ ) )
self.logger.log(UpperCamelCase_ , commit=UpperCamelCase_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: List[Any] ):
__lowerCamelCase = get_batched_dataset(UpperCamelCase_ , self.args.batch_size )
__lowerCamelCase = len(UpperCamelCase_ ) // self.args.batch_size
__lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCamelCase = 0
for batch in tqdm(UpperCamelCase_ , total=UpperCamelCase_ , desc="""Evaluating ... """ ):
__lowerCamelCase = self.data_collator(UpperCamelCase_ )
__lowerCamelCase = self.val_step_fn(UpperCamelCase_ , **UpperCamelCase_ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = jax_utils.unreplicate(UpperCamelCase_ )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """ )
self.model_save_fn(UpperCamelCase_ , params=state.params )
with open(os.path.join(UpperCamelCase_ , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(UpperCamelCase_ , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(UpperCamelCase_ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase_ , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , UpperCamelCase_ )
print("""DONE""" )
def lowerCamelCase__ ( A__ : str , A__ : List[str] ):
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(A__ , """flax_model.msgpack""" ) , """rb""" ) as f:
__lowerCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(A__ , """opt_state.msgpack""" ) , """rb""" ) as f:
__lowerCamelCase = from_bytes(state.opt_state , f.read() )
__lowerCamelCase = joblib.load(os.path.join(A__ , """args.joblib""" ) )
__lowerCamelCase = joblib.load(os.path.join(A__ , """data_collator.joblib""" ) )
with open(os.path.join(A__ , """training_state.json""" ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
__lowerCamelCase = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : List[str] , A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = num_train_steps - warmup_steps
__lowerCamelCase = optax.linear_schedule(init_value=A__ , end_value=A__ , transition_steps=A__ )
__lowerCamelCase = optax.linear_schedule(init_value=A__ , end_value=1E-7 , transition_steps=A__ )
__lowerCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCamelCase__ ( A__ : Any , A__ : Union[str, Any] , A__ : Dict , A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
def weight_decay_mask(A__ : str ):
__lowerCamelCase = traverse_util.flatten_dict(A__ )
__lowerCamelCase = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(A__ )
__lowerCamelCase = scheduler_fn(A__ , A__ , A__ , A__ )
__lowerCamelCase = optax.adamw(learning_rate=A__ , weight_decay=A__ , mask=A__ )
return tx, lr
| 700 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = 'nezha'
def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = max_relative_position
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
| 80 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase_ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase__ ( *A__ : Any ):
'''simple docstring'''
with open(A__ , """r""" ) as fh:
fcntl.flock(A__ , fcntl.LOCK_EX )
try:
print(*A__ )
finally:
fcntl.flock(A__ , fcntl.LOCK_UN )
UpperCAmelCase_ = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
UpperCAmelCase_ = torch.device('cuda', local_rank)
UpperCAmelCase_ = socket.gethostname()
UpperCAmelCase_ = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase_ = dist.get_rank()
UpperCAmelCase_ = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 702 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 | 0 |
UpperCAmelCase_ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 703 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
if b == 0:
return (1, 0)
((__lowerCamelCase), (__lowerCamelCase)) = extended_euclid(A__ , a % b )
__lowerCamelCase = a // b
return (y, x - k * y)
def lowerCamelCase__ ( A__ : int , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
((__lowerCamelCase), (__lowerCamelCase)) = extended_euclid(A__ , A__ )
__lowerCamelCase = na * na
__lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
((__lowerCamelCase), (__lowerCamelCase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowerCamelCase = (b % n + n) % n
return b
def lowerCamelCase__ ( A__ : int , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowerCamelCase = na * na
__lowerCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 704 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 705 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 0 |
import sys
from collections import defaultdict
class lowerCamelCase__:
def __init__( self: Optional[int] ):
__lowerCamelCase = []
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] ):
return self.node_position[vertex]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int ):
__lowerCamelCase = pos
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase, __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase, __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase, __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase_ )
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , UpperCamelCase_ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(UpperCamelCase_ , UpperCamelCase_ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(UpperCamelCase_ , 0 )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple ):
__lowerCamelCase = len(UpperCamelCase_ ) // 2 - 1
for i in range(UpperCamelCase_ , -1 , -1 ):
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , len(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple ):
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(UpperCamelCase_ , 0 , len(UpperCamelCase_ ) , UpperCamelCase_ )
return temp
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(A__ )
__lowerCamelCase = [-1] * len(A__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(A__ ) ):
distance_tv.append(sys.maxsize )
positions.append(A__ )
heap.node_position.append(A__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(A__ , A__ )
for _ in range(1 , len(A__ ) ):
__lowerCamelCase = heap.delete_minimum(A__ , A__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
A__ , heap.get_position(A__ ) , A__ , A__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase_ = int(input('Enter number of edges: ').strip())
UpperCAmelCase_ = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 706 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : int
class lowerCamelCase__:
def __init__( self: Dict , UpperCamelCase_: int ):
__lowerCamelCase = [[] for _ in range(UpperCamelCase_ )]
__lowerCamelCase = size
def __getitem__( self: Optional[int] , UpperCamelCase_: int ):
return iter(self._graph[vertex] )
@property
def lowerCAmelCase__ ( self: Any ):
return self._size
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int ):
__lowerCamelCase = deque([start_vertex] )
__lowerCamelCase = [None] * self.size
__lowerCamelCase = 0
while queue:
__lowerCamelCase = queue.popleft()
__lowerCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__lowerCamelCase = current_distance + edge.weight
__lowerCamelCase = distances[edge.destination_vertex]
if (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and new_distance >= dest_vertex_distance
):
continue
__lowerCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Dict=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[int]=99 , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: int=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: str=16 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: int=3 , UpperCamelCase_: Union[str, Any]=4 , UpperCamelCase_: Dict=None , UpperCamelCase_: Dict=10_00 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = range_bbox
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase = bbox[i, j, 3]
__lowerCamelCase = bbox[i, j, 1]
__lowerCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase = bbox[i, j, 2]
__lowerCamelCase = bbox[i, j, 0]
__lowerCamelCase = t
__lowerCamelCase = tf.convert_to_tensor(UpperCamelCase_ )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: List[Any] ):
__lowerCamelCase = TFLayoutLMModel(config=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[str] ):
__lowerCamelCase = TFLayoutLMForMaskedLM(config=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Any ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFLayoutLMForSequenceClassification(config=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFLayoutLMForTokenClassification(config=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFLayoutLMForQuestionAnswering(config=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : str = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Any = 10
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = TFLayoutLMModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: Dict ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TFLayoutLMModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def lowerCAmelCase__ ( self: Dict ):
pass
def lowerCamelCase__ ( ):
__lowerCamelCase = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
__lowerCamelCase = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__lowerCamelCase = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
__lowerCamelCase = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__lowerCamelCase = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
# test the sequence output on [0, :3, :3]
__lowerCamelCase = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-3 ) )
# test the pooled output on [1, :3]
__lowerCamelCase = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase_ , atol=1E-3 ) )
@slow
def lowerCAmelCase__ ( self: Dict ):
# initialize model with randomly initialized sequence classification head
__lowerCamelCase = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(
input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
__lowerCamelCase = outputs.loss
__lowerCamelCase = (2,)
self.assertEqual(loss.shape , UpperCamelCase_ )
# test the shape of the logits
__lowerCamelCase = outputs.logits
__lowerCamelCase = (2, 2)
self.assertEqual(logits.shape , UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
# initialize model with randomly initialized token classification head
__lowerCamelCase = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(
input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
# test the shape of the logits
__lowerCamelCase = outputs.logits
__lowerCamelCase = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: str ):
# initialize model with randomly initialized token classification head
__lowerCamelCase = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = prepare_layoutlm_batch_inputs()
# forward pass
__lowerCamelCase = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
# test the shape of the logits
__lowerCamelCase = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , UpperCamelCase_ )
self.assertEqual(outputs.end_logits.shape , UpperCamelCase_ )
| 708 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 0 |
from functools import lru_cache
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = 2
__lowerCamelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(A__ )
if n > 1:
factors.add(A__ )
return factors
@lru_cache
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
return len(unique_prime_factors(A__ ) )
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
return len(set(A__ ) ) in (0, 1)
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = 2
while True:
# Increment each value of a generated range
__lowerCamelCase = [base + i for i in range(A__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCamelCase = [upf_len(A__ ) for x in group]
checker.append(A__ )
# If all numbers in the list are equal, return the group variable.
if equality(A__ ):
return group
# Increment our base variable by 1
base += 1
def lowerCamelCase__ ( A__ : int = 4 ):
'''simple docstring'''
__lowerCamelCase = run(A__ )
return results[0] if len(A__ ) else None
if __name__ == "__main__":
print(solution())
| 709 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'lilt'
def __init__( self: Dict , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: List[str]=7_68 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: Optional[Any]=30_72 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Dict=5_12 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: str=0.02 , UpperCamelCase_: List[Any]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: Dict=10_24 , **UpperCamelCase_: Optional[int] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = classifier_dropout
__lowerCamelCase = channel_shrink_ratio
__lowerCamelCase = max_ad_position_embeddings
| 710 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 711 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 712 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( A__ : bytes ):
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase__ ( A__ : Tuple , A__ : str , A__ : Any , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = s.rsplit(A__ , A__ )
return new.join(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowerCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__lowerCamelCase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowerCamelCase = rreplace(A__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowerCamelCase = rreplace(A__ , """.b""" , """.bias""" , 1 )
__lowerCamelCase = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : int=None , A__ : Optional[Any]=True ):
'''simple docstring'''
from dall_e import Encoder
__lowerCamelCase = Encoder()
if os.path.exists(A__ ):
__lowerCamelCase = torch.load(A__ )
else:
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ )
if isinstance(A__ , A__ ):
__lowerCamelCase = ckpt.state_dict()
encoder.load_state_dict(A__ )
if config_path is not None:
__lowerCamelCase = FlavaImageCodebookConfig.from_pretrained(A__ )
else:
__lowerCamelCase = FlavaImageCodebookConfig()
__lowerCamelCase = FlavaImageCodebook(A__ ).eval()
__lowerCamelCase = encoder.state_dict()
__lowerCamelCase = upgrade_state_dict(A__ )
hf_model.load_state_dict(A__ )
__lowerCamelCase = hf_model.state_dict()
__lowerCamelCase = count_parameters(A__ )
__lowerCamelCase = count_parameters(A__ )
assert torch.allclose(A__ , A__ , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(A__ )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase_ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 714 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'facebook/nllb-200-distilled-600M'
UpperCAmelCase__ : Union[str, Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
UpperCAmelCase__ : Dict = 'translator'
UpperCAmelCase__ : str = AutoTokenizer
UpperCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM
UpperCAmelCase__ : str = LANGUAGE_CODES
UpperCAmelCase__ : Optional[Any] = ['text', 'text', 'text']
UpperCAmelCase__ : Dict = ['text']
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] ):
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
__lowerCamelCase = self.lang_to_code[src_lang]
__lowerCamelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase_ , return_tensors="""pt""" , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[Any] ):
return self.model.generate(**UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase_ )
| 715 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ):
__lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
__lowerCamelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 80 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : int = ViTImageProcessor if is_vision_available() else None
@property
def lowerCAmelCase__ ( self: List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = (3, 32, 1_28)
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
__lowerCamelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
__lowerCamelCase = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , **UpperCamelCase_: int ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , **UpperCamelCase_: List[str] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
__lowerCamelCase = Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) )
return image_input
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowerCamelCase = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__lowerCamelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = processor(images=UpperCamelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = """test"""
__lowerCamelCase = processor(text=UpperCamelCase_ )
__lowerCamelCase = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = """test"""
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.char_decode(UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ )
__lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = None
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = torch.randn(1 , 27 , 38 )
__lowerCamelCase = torch.randn(1 , 27 , 5_02_57 )
__lowerCamelCase = torch.randn(1 , 27 , 3_05_22 )
__lowerCamelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] ) | 716 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 0 |
from pathlib import Path
import fire
def lowerCamelCase__ ( A__ : str , A__ : str , A__ : int ):
'''simple docstring'''
__lowerCamelCase = Path(A__ )
__lowerCamelCase = Path(A__ )
dest_dir.mkdir(exist_ok=A__ )
for path in src_dir.iterdir():
__lowerCamelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCamelCase = dest_dir.joinpath(path.name )
print(A__ )
dest_path.open("""w""" ).write("""\n""".join(A__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 717 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 80 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase__:
def __init__( self: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=False , UpperCamelCase_: Optional[Any]=10 , UpperCamelCase_: str=3 , UpperCamelCase_: Optional[int]=32 * 4 , UpperCamelCase_: Optional[Any]=32 * 6 , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: int=32 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = mask_feature_size
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase_ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase_ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase_ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase_ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self: Dict ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , config.decoder_config.decoder_layers )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Tuple=False ):
with torch.no_grad():
__lowerCamelCase = MaskFormerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] ):
__lowerCamelCase = MaskFormerForInstanceSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
def comm_check_on_output(UpperCamelCase_: Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
__lowerCamelCase = model(
pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = MaskFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase_ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowerCAmelCase__ ( self: str ):
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowerCAmelCase__ ( self: int ):
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowerCAmelCase__ ( self: List[str] ):
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCAmelCase__ ( self: Optional[int] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: Any ):
pass
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: Optional[int] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowerCamelCase = MaskFormerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=UpperCamelCase_ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=UpperCamelCase_ ),
"""class_labels""": torch.zeros(2 , 10 , device=UpperCamelCase_ ).long(),
}
__lowerCamelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase_ )
__lowerCamelCase = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
__lowerCamelCase = model(**UpperCamelCase_ , output_attentions=UpperCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self: Union[str, Any] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
__lowerCamelCase = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ).loss
loss.backward()
def lowerCAmelCase__ ( self: List[str] ):
# only MaskFormerForInstanceSegmentation has the loss
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
__lowerCamelCase = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase_ = 1E-4
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCamelCase__( unittest.TestCase):
@cached_property
def lowerCAmelCase__ ( self: str ):
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCamelCase_ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
__lowerCamelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
__lowerCamelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
__lowerCamelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCamelCase_ )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
__lowerCamelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCamelCase = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
__lowerCamelCase = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(UpperCamelCase_ )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
__lowerCamelCase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowerCamelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
__lowerCamelCase = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCamelCase_ )
.eval()
)
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__lowerCamelCase = inputs["""pixel_values"""].to(UpperCamelCase_ )
__lowerCamelCase = [el.to(UpperCamelCase_ ) for el in inputs["""mask_labels"""]]
__lowerCamelCase = [el.to(UpperCamelCase_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 718 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase__ ( A__ : str , A__ : float | Decimal , A__ : float = 10**-10 ):
'''simple docstring'''
__lowerCamelCase = a
while True:
__lowerCamelCase = Decimal(A__ ) - (
Decimal(eval(A__ ) ) / Decimal(eval(str(diff(A__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(A__ ) ) < precision: # noqa: S307
return float(A__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(f"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(f"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(f"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 719 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = len(A__ ) + 1
__lowerCamelCase = len(A__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowerCamelCase = [[0 for i in range(A__ )] for j in range(A__ )]
# since string of zero length match pattern of zero length
__lowerCamelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , A__ ):
__lowerCamelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , A__ ):
__lowerCamelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , A__ ):
for j in range(1 , A__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowerCamelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowerCamelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowerCamelCase = dp[i - 1][j]
else:
__lowerCamelCase = 0
else:
__lowerCamelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCAmelCase_ = 'aab'
UpperCAmelCase_ = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 720 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 | 0 |
from __future__ import annotations
import math
class lowerCamelCase__:
def __init__( self: Optional[Any] , UpperCamelCase_: int ):
__lowerCamelCase = size
# approximate the overall size of segment tree with given value
__lowerCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__lowerCamelCase = [0 for i in range(0 , 4 * size )]
__lowerCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int ):
return idx * 2
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int ):
return idx * 2 + 1
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[int] ):
if left_element == right_element:
__lowerCamelCase = a[left_element - 1]
else:
__lowerCamelCase = (left_element + right_element) // 2
self.build(self.left(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.build(self.right(UpperCamelCase_ ) , mid + 1 , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = max(
self.segment_tree[self.left(UpperCamelCase_ )] , self.segment_tree[self.right(UpperCamelCase_ )] )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ):
if self.flag[idx] is True:
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = False
if left_element != right_element:
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = True
__lowerCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__lowerCamelCase = val
if left_element != right_element:
__lowerCamelCase = val
__lowerCamelCase = val
__lowerCamelCase = True
__lowerCamelCase = True
return True
__lowerCamelCase = (left_element + right_element) // 2
self.update(self.left(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.update(self.right(UpperCamelCase_ ) , mid + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = max(
self.segment_tree[self.left(UpperCamelCase_ )] , self.segment_tree[self.right(UpperCamelCase_ )] )
return True
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int ):
if self.flag[idx] is True:
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = False
if left_element != right_element:
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = self.lazy[idx]
__lowerCamelCase = True
__lowerCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__lowerCamelCase = (left_element + right_element) // 2
__lowerCamelCase = self.query(self.left(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.query(self.right(UpperCamelCase_ ) , mid + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return max(UpperCamelCase_ , UpperCamelCase_ )
def __str__( self: Optional[int] ):
return str([self.query(1 , 1 , self.size , UpperCamelCase_ , UpperCamelCase_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCAmelCase_ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCAmelCase_ = 15
UpperCAmelCase_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 721 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 80 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = FileLock(str(tmpdir / """foo.lock""" ) )
__lowerCamelCase = FileLock(str(tmpdir / """foo.lock""" ) )
__lowerCamelCase = 0.01
with locka.acquire():
with pytest.raises(A__ ):
__lowerCamelCase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = """a""" * 1000 + """.lock"""
__lowerCamelCase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowerCamelCase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 700 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = 'nezha'
def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = max_relative_position
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
| 80 | 0 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = VideoMAEConfig()
set_architecture_configs(A__ , A__ )
if "finetuned" not in model_name:
__lowerCamelCase = False
if "finetuned" in model_name:
__lowerCamelCase = """huggingface/label-files"""
if "kinetics" in model_name:
__lowerCamelCase = 400
__lowerCamelCase = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__lowerCamelCase = 174
__lowerCamelCase = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__lowerCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Optional[Any] ):
'''simple docstring'''
if "small" in model_name:
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 3
__lowerCamelCase = 192
__lowerCamelCase = 768
elif "large" in model_name:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 8
__lowerCamelCase = 512
__lowerCamelCase = 2048
elif "huge" in model_name:
__lowerCamelCase = 1280
__lowerCamelCase = 5120
__lowerCamelCase = 32
__lowerCamelCase = 16
__lowerCamelCase = 12
__lowerCamelCase = 8
__lowerCamelCase = 640
__lowerCamelCase = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
if "encoder." in name:
__lowerCamelCase = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__lowerCamelCase = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__lowerCamelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__lowerCamelCase = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__lowerCamelCase = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__lowerCamelCase = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__lowerCamelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__lowerCamelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__lowerCamelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__lowerCamelCase = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__lowerCamelCase = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__lowerCamelCase = name.replace("""head""" , """classifier""" )
return name
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(A__ )
if key.startswith("""encoder.""" ):
__lowerCamelCase = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__lowerCamelCase = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__lowerCamelCase = config.decoder_hidden_size
__lowerCamelCase = int(key_split[2] )
__lowerCamelCase = """decoder.decoder_layers."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = config.hidden_size
__lowerCamelCase = int(key_split[1] )
__lowerCamelCase = """videomae.encoder.layer."""
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
else:
__lowerCamelCase = val
return orig_state_dict
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__lowerCamelCase = np.load(A__ )
return list(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Tuple , A__ : Union[str, Any] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = get_videomae_config(A__ )
if "finetuned" in model_name:
__lowerCamelCase = VideoMAEForVideoClassification(A__ )
else:
__lowerCamelCase = VideoMAEForPreTraining(A__ )
# download original checkpoint, hosted on Google Drive
__lowerCamelCase = """pytorch_model.bin"""
gdown.cached_download(A__ , A__ , quiet=A__ )
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
if "model" in files:
__lowerCamelCase = files["""model"""]
else:
__lowerCamelCase = files["""module"""]
__lowerCamelCase = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
model.eval()
# verify model on basic input
__lowerCamelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
__lowerCamelCase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__lowerCamelCase = torch.load(A__ )
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits
__lowerCamelCase = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
__lowerCamelCase = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
__lowerCamelCase = torch.Size([1, 400] )
__lowerCamelCase = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
__lowerCamelCase = torch.Size([1, 1408, 1536] )
__lowerCamelCase = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
__lowerCamelCase = torch.Size([1, 174] )
__lowerCamelCase = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , A__ , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , A__ , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__lowerCamelCase = outputs.loss
assert torch.allclose(A__ , A__ , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
model.save_pretrained(A__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(A__ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 701 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 0 |
def lowerCamelCase__ ( A__ : float ):
'''simple docstring'''
return 10 - x * x
def lowerCamelCase__ ( A__ : float , A__ : float ):
'''simple docstring'''
if equation(A__ ) * equation(A__ ) >= 0:
raise ValueError("""Wrong space!""" )
__lowerCamelCase = a
while (b - a) >= 0.01:
# Find middle point
__lowerCamelCase = (a + b) / 2
# Check if middle point is root
if equation(A__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A__ ) * equation(A__ ) < 0:
__lowerCamelCase = c
else:
__lowerCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 702 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 | 0 |
from math import isqrt, loga
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A__ , A__ ):
__lowerCamelCase = False
return [i for i in range(2 , A__ ) if is_prime[i]]
def lowerCamelCase__ ( A__ : int = 800800 , A__ : int = 800800 ):
'''simple docstring'''
__lowerCamelCase = degree * loga(A__ )
__lowerCamelCase = int(A__ )
__lowerCamelCase = calculate_prime_numbers(A__ )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = len(A__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 704 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Dict ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Dict=None , UpperCamelCase_: Tuple=None ):
__lowerCamelCase = {}
__lowerCamelCase = {}
if prompt is not None:
__lowerCamelCase = prompt
if generate_kwargs is not None:
__lowerCamelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__lowerCamelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__lowerCamelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self: Optional[Any] , UpperCamelCase_: Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase_: Union[str, Any] ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=None ):
__lowerCamelCase = load_image(UpperCamelCase_ )
if prompt is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'Received an invalid text input, got - {type(UpperCamelCase_ )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
__lowerCamelCase = self.model.config.model_type
if model_type == "git":
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.tokenizer(text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ).input_ids
__lowerCamelCase = [self.tokenizer.cls_token_id] + input_ids
__lowerCamelCase = torch.tensor(UpperCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , header_text=UpperCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase_ )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__lowerCamelCase = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__lowerCamelCase = None
return model_inputs
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , UpperCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__lowerCamelCase = None
if generate_kwargs is None:
__lowerCamelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__lowerCamelCase = model_inputs.pop(self.model.main_input_name )
__lowerCamelCase = self.model.generate(UpperCamelCase_ , **UpperCamelCase_ , **UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Dict ):
__lowerCamelCase = []
for output_ids in model_outputs:
__lowerCamelCase = {
"""generated_text""": self.tokenizer.decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , )
}
records.append(UpperCamelCase_ )
return records
| 705 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 706 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 | 0 |
UpperCAmelCase_ = 8.31_4462 # Unit - J mol-1 K-1
def lowerCamelCase__ ( A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase__ ( A__ : float , A__ : float , A__ : float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCamelCase__ ( A__ : Dict , A__ : Any=7 ):
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
__lowerCamelCase = """636036"""
__lowerCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
__lowerCamelCase = requests.get(A__ , headers=A__ ).json()
return result["workflow_runs"]
def lowerCamelCase__ ( A__ : List[str] ):
__lowerCamelCase = get_daily_ci_runs(A__ )
__lowerCamelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__lowerCamelCase = workflow_run["""id"""]
break
return workflow_run_id
def lowerCamelCase__ ( A__ : List[Any] , A__ : str , A__ : Optional[int] ):
__lowerCamelCase = get_last_daily_ci_runs(A__ )
if workflow_run_id is not None:
__lowerCamelCase = get_artifacts_links(worflow_run_id=A__ , token=A__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__lowerCamelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=A__ , artifact_url=A__ , output_dir=A__ , token=A__ )
def lowerCamelCase__ ( A__ : List[Any] , A__ : Optional[Any] , A__ : Any ):
get_last_daily_ci_artifacts(A__ , A__ , A__ )
__lowerCamelCase = {}
for artifact_name in artifact_names:
__lowerCamelCase = os.path.join(A__ , f'{artifact_name}.zip' )
if os.path.isfile(A__ ):
__lowerCamelCase = {}
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
with z.open(A__ ) as f:
__lowerCamelCase = f.read().decode("""UTF-8""" )
return results
| 708 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 0 |
from __future__ import annotations
UpperCAmelCase_ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( A__ : Matrix , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(A__ ):
__lowerCamelCase, __lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A__ , A__ , A__ , A__ ):
__lowerCamelCase = digit
if sudoku(A__ ) is not None:
return grid
__lowerCamelCase = 0
return None
def lowerCamelCase__ ( A__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(A__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 709 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = os.path.dirname(os.path.realpath(A__ ) )
__lowerCamelCase = os.path.join(A__ , """words.txt""" )
__lowerCamelCase = """"""
with open(A__ ) as f:
__lowerCamelCase = f.readline()
__lowerCamelCase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
__lowerCamelCase = [
word
for word in [sum(ord(A__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A__ )
if __name__ == "__main__":
print(solution())
| 710 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 0 |
UpperCAmelCase_ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
__lowerCamelCase = Stack()
__lowerCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(A__ ) )
elif i in operators:
# RULE 2
operator_stack.push(A__ )
elif i == ")":
# RULE 4
__lowerCamelCase = operator_stack.peek()
operator_stack.pop()
__lowerCamelCase = operand_stack.peek()
operand_stack.pop()
__lowerCamelCase = operand_stack.peek()
operand_stack.pop()
__lowerCamelCase = operators[opr](A__ , A__ )
operand_stack.push(A__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase_ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 711 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 712 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
__lowerCamelCase = sorted(string.lower() )
return len(A__ ) == len(set(A__ ) )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter a string ').strip()
UpperCAmelCase_ = is_isogram(input_str)
print(f"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 713 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase__( unittest.TestCase):
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.dummy_uncond_unet
__lowerCamelCase = PNDMScheduler()
__lowerCamelCase = PNDMPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
pndm.to(UpperCamelCase_ )
pndm.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pndm(generator=UpperCamelCase_ , num_inference_steps=20 , output_type="""numpy""" ).images
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pndm(generator=UpperCamelCase_ , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCamelCase_ )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = """google/ddpm-cifar10-32"""
__lowerCamelCase = UNetaDModel.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = PNDMScheduler()
__lowerCamelCase = PNDMPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
pndm.to(UpperCamelCase_ )
pndm.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pndm(generator=UpperCamelCase_ , output_type="""numpy""" ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 714 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 | 0 |
def lowerCamelCase__ ( A__ : int = 10 , A__ : int = 1000 , A__ : bool = True ):
'''simple docstring'''
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and isinstance(A__ , A__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return int((number_a + number_a) / 2 )
def lowerCamelCase__ ( A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
assert (
isinstance(A__ , A__ ) and isinstance(A__ , A__ ) and isinstance(A__ , A__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(A__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
__lowerCamelCase = lower
__lowerCamelCase = higher
__lowerCamelCase = []
while True:
__lowerCamelCase = get_avg(A__ , A__ )
last_numbers.append(A__ )
if answer(A__ ) == "low":
__lowerCamelCase = number
elif answer(A__ ) == "high":
__lowerCamelCase = number
else:
break
print(f'guess the number : {last_numbers[-1]}' )
print(f'details : {last_numbers!s}' )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter lower value : """ ).strip() )
__lowerCamelCase = int(input("""Enter high value : """ ).strip() )
__lowerCamelCase = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(A__ , A__ , A__ )
if __name__ == "__main__":
main()
| 715 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ):
__lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
__lowerCamelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 80 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
UpperCAmelCase_ = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
UpperCAmelCase_ = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
UpperCAmelCase_ = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 716 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
def is_valid_tree(A__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(A__ , A__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(A__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
A__ : TreeNode | None , A__ : float , A__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , A__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , A__ )
)
return is_binary_search_tree_recursive_check(A__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 80 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" , A__ ).groups()[0]
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: int , UpperCamelCase_: str , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[Any]=None ):
__lowerCamelCase = file_names
__lowerCamelCase = image_transform
__lowerCamelCase = label_to_id
def __len__( self: Tuple ):
return len(self.file_names )
def __getitem__( self: Tuple , UpperCamelCase_: Dict ):
__lowerCamelCase = self.file_names[idx]
__lowerCamelCase = PIL.Image.open(UpperCamelCase_ )
__lowerCamelCase = raw_image.convert("""RGB""" )
if self.image_transform is not None:
__lowerCamelCase = self.image_transform(UpperCamelCase_ )
__lowerCamelCase = extract_label(UpperCamelCase_ )
if self.label_to_id is not None:
__lowerCamelCase = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCamelCase__ ( A__ : List[Any] , A__ : List[Any] ):
'''simple docstring'''
if args.with_tracking:
__lowerCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = config["""image_size"""]
if not isinstance(A__ , (list, tuple) ):
__lowerCamelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
__lowerCamelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__lowerCamelCase = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
__lowerCamelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__lowerCamelCase = os.path.split(A__ )[-1].split(""".""" )[0]
accelerator.init_trackers(A__ , A__ )
# Grab all the image filenames
__lowerCamelCase = [os.path.join(args.data_dir , A__ ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
__lowerCamelCase = [extract_label(A__ ) for fname in file_names]
__lowerCamelCase = list(set(A__ ) )
id_to_label.sort()
__lowerCamelCase = {lbl: i for i, lbl in enumerate(A__ )}
# Set the seed before splitting the data.
np.random.seed(A__ )
torch.manual_seed(A__ )
torch.cuda.manual_seed_all(A__ )
# Split our filenames between train and validation
__lowerCamelCase = np.random.permutation(len(A__ ) )
__lowerCamelCase = int(0.8 * len(A__ ) )
__lowerCamelCase = random_perm[:cut]
__lowerCamelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__lowerCamelCase = Compose([RandomResizedCrop(A__ , scale=(0.5, 1.0) ), ToTensor()] )
__lowerCamelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=A__ , label_to_id=A__ )
# For evaluation, we use a deterministic Resize
__lowerCamelCase = Compose([Resize(A__ ), ToTensor()] )
__lowerCamelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=A__ , label_to_id=A__ )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
__lowerCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = create_model("""resnet50d""" , pretrained=A__ , num_classes=len(A__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__lowerCamelCase = False
for param in model.get_classifier().parameters():
__lowerCamelCase = True
# We normalize the batches of images to be a bit faster.
__lowerCamelCase = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
__lowerCamelCase = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
__lowerCamelCase = OneCycleLR(optimizer=A__ , max_lr=A__ , epochs=A__ , steps_per_epoch=len(A__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the starting epoch so files are named properly
__lowerCamelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__lowerCamelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__lowerCamelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__lowerCamelCase = os.path.splitext(A__ )[0]
if "epoch" in training_difference:
__lowerCamelCase = int(training_difference.replace("""epoch_""" , """""" ) ) + 1
__lowerCamelCase = None
else:
__lowerCamelCase = int(training_difference.replace("""step_""" , """""" ) )
__lowerCamelCase = resume_step // len(A__ )
resume_step -= starting_epoch * len(A__ )
# Now we train the model
for epoch in range(A__ , A__ ):
model.train()
if args.with_tracking:
__lowerCamelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__lowerCamelCase = accelerator.skip_first_batches(A__ , A__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__lowerCamelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCamelCase = (batch["""image"""] - mean) / std
__lowerCamelCase = model(A__ )
__lowerCamelCase = torch.nn.functional.cross_entropy(A__ , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A__ , A__ ):
__lowerCamelCase = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
model.eval()
__lowerCamelCase = 0
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCamelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCamelCase = (batch["""image"""] - mean) / std
with torch.no_grad():
__lowerCamelCase = model(A__ )
__lowerCamelCase = outputs.argmax(dim=-1 )
__lowerCamelCase, __lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
__lowerCamelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__lowerCamelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(A__ ),
"""epoch""": epoch,
} , step=A__ , )
if checkpointing_steps == "epoch":
__lowerCamelCase = f'epoch_{epoch}'
if args.output_dir is not None:
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=A__ , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=A__ , default=A__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=A__ , default=A__ , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=A__ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 3E-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 718 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 | 0 |
from math import isqrt
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , A__ , A__ ):
__lowerCamelCase = False
return [i for i in range(2 , A__ ) if is_prime[i]]
def lowerCamelCase__ ( A__ : int = 10**8 ):
'''simple docstring'''
__lowerCamelCase = calculate_prime_numbers(max_number // 2 )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = len(A__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
UpperCAmelCase_ = False
try:
UpperCAmelCase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: str = None , UpperCamelCase_: list = [] ):
__lowerCamelCase = 0
__lowerCamelCase = choices
__lowerCamelCase = prompt
if sys.platform == "win32":
__lowerCamelCase = """*"""
else:
__lowerCamelCase = """➔ """
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCamelCase_ )
else:
forceWrite(self.choices[index] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(UpperCamelCase_ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Direction , UpperCamelCase_: int = 1 ):
__lowerCamelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase_ )
move_cursor(UpperCamelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def lowerCAmelCase__ ( self: Dict ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def lowerCAmelCase__ ( self: str ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def lowerCAmelCase__ ( self: int ):
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def lowerCAmelCase__ ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase_ )] for number in range(10 )] )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = int(chr(self.current_selection ) )
__lowerCamelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCamelCase_ )
else:
return
else:
return
def lowerCAmelCase__ ( self: int , UpperCamelCase_: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
__lowerCamelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase_ )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase = int(builtins.input() )
except ValueError:
__lowerCamelCase = default_choice
else:
__lowerCamelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(UpperCamelCase_ , """\n""" )
return choice
| 720 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = 'sew'
def __init__( self: Union[str, Any] , UpperCamelCase_: int=32 , UpperCamelCase_: Optional[Any]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: Optional[Any]=30_72 , UpperCamelCase_: Dict=2 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: int=0.1 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: Tuple=1E-5 , UpperCamelCase_: Optional[int]="group" , UpperCamelCase_: str="gelu" , UpperCamelCase_: Optional[int]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_: List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase_: Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase_: List[str]=False , UpperCamelCase_: Optional[int]=1_28 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=True , UpperCamelCase_: Tuple=0.05 , UpperCamelCase_: List[Any]=10 , UpperCamelCase_: Tuple=2 , UpperCamelCase_: Tuple=0.0 , UpperCamelCase_: List[Any]=10 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: Union[str, Any]="mean" , UpperCamelCase_: Dict=False , UpperCamelCase_: str=False , UpperCamelCase_: str=2_56 , UpperCamelCase_: str=0 , UpperCamelCase_: int=1 , UpperCamelCase_: Union[str, Any]=2 , **UpperCamelCase_: str , ):
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = squeeze_factor
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# sequence classification
__lowerCamelCase = use_weighted_layer_sum
__lowerCamelCase = classifier_proj_size
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 721 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 80 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = 'nezha'
def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = max_relative_position
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
| 80 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['PerceiverFeatureExtractor']
UpperCAmelCase_ = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 0 |
from collections.abc import Sequence
from queue import Queue
class lowerCamelCase__:
def __init__( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict=None , UpperCamelCase_: Tuple=None ):
__lowerCamelCase = start
__lowerCamelCase = end
__lowerCamelCase = val
__lowerCamelCase = (start + end) // 2
__lowerCamelCase = left
__lowerCamelCase = right
def __repr__( self: Tuple ):
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class lowerCamelCase__:
def __init__( self: int , UpperCamelCase_: Sequence , UpperCamelCase_: str ):
__lowerCamelCase = collection
__lowerCamelCase = function
if self.collection:
__lowerCamelCase = self._build_tree(0 , len(UpperCamelCase_ ) - 1 )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Dict ):
self._update_tree(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ):
return self._query_range(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict ):
if start == end:
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.collection[start] )
__lowerCamelCase = (start + end) // 2
__lowerCamelCase = self._build_tree(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self._build_tree(mid + 1 , UpperCamelCase_ )
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.fn(left.val , right.val ) , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
if node.start == i and node.end == i:
__lowerCamelCase = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
self._update_tree(node.right , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.fn(node.left.val , node.right.val )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Dict ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
if self.root is not None:
__lowerCamelCase = Queue()
queue.put(self.root )
while not queue.empty():
__lowerCamelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
UpperCAmelCase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 702 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 | 0 |
UpperCAmelCase_ = tuple[float, float, float]
UpperCAmelCase_ = tuple[float, float, float]
def lowerCamelCase__ ( A__ : Pointad , A__ : Pointad ):
'''simple docstring'''
__lowerCamelCase = end_pointa[0] - end_pointa[0]
__lowerCamelCase = end_pointa[1] - end_pointa[1]
__lowerCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowerCamelCase__ ( A__ : Vectorad , A__ : Vectorad ):
'''simple docstring'''
__lowerCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
__lowerCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__lowerCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowerCamelCase__ ( A__ : Vectorad , A__ : int ):
'''simple docstring'''
return tuple(round(A__ , A__ ) for x in vector ) == (0, 0, 0)
def lowerCamelCase__ ( A__ : Pointad , A__ : Pointad , A__ : Pointad , A__ : int = 10 ):
'''simple docstring'''
__lowerCamelCase = create_vector(A__ , A__ )
__lowerCamelCase = create_vector(A__ , A__ )
return is_zero_vector(get_ad_vectors_cross(A__ , A__ ) , A__ )
| 703 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 0 |
from bisect import bisect
from itertools import accumulate
def lowerCamelCase__ ( A__ : Any , A__ : str , A__ : List[str] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = sorted(zip(A__ , A__ ) , key=lambda A__ : x[0] / x[1] , reverse=A__ )
__lowerCamelCase, __lowerCamelCase = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase = list(accumulate(A__ ) )
__lowerCamelCase = bisect(A__ , A__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 705 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[float] , A__ : list[float] ):
'''simple docstring'''
__lowerCamelCase = sorted(numsa + numsa )
__lowerCamelCase, __lowerCamelCase = divmod(len(A__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase_ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 80 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Dict , A__ : Union[str, Any] , A__ : Any , A__ : Dict ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Tuple , A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 706 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 80 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : Dict = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : str = frozenset([])
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 707 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = 10
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = [1, 2, 3, 4]
__lowerCamelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowerCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
__lowerCamelCase, __lowerCamelCase = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = """"""
__lowerCamelCase, __lowerCamelCase = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
__lowerCamelCase, __lowerCamelCase = process_story(UpperCamelCase_ )
__lowerCamelCase = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = ["""It was the best of times."""]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch.tensor([1, 2, 3, 4] )
__lowerCamelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowerCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowerCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 1_01
__lowerCamelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
__lowerCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowerCamelCase = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 708 |
class lowerCamelCase__: # Public class to implement a graph
def __init__( self: Dict , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
__lowerCamelCase = row
__lowerCamelCase = col
__lowerCamelCase = graph
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: list[list[bool]] ):
# Checking all 8 elements surrounding nth element
__lowerCamelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ): # And finally, count all islands.
__lowerCamelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
__lowerCamelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 80 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
__lowerCamelCase = DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 1024
__lowerCamelCase = 4096
__lowerCamelCase = 24
__lowerCamelCase = 16
__lowerCamelCase = [5, 11, 17, 23]
__lowerCamelCase = [256, 512, 1024, 1024]
__lowerCamelCase = (1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase = True
__lowerCamelCase = 150
__lowerCamelCase = """huggingface/label-files"""
__lowerCamelCase = """ade20k-id2label.json"""
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCamelCase = {int(A__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__lowerCamelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__lowerCamelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__lowerCamelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__lowerCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__lowerCamelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__lowerCamelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__lowerCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__lowerCamelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__lowerCamelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__lowerCamelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__lowerCamelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__lowerCamelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__lowerCamelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__lowerCamelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__lowerCamelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__lowerCamelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__lowerCamelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__lowerCamelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__lowerCamelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__lowerCamelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__lowerCamelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__lowerCamelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__lowerCamelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def lowerCamelCase__ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = get_dpt_config(A__ )
# load original state_dict from URL
__lowerCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(A__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(A__ )
__lowerCamelCase = val
# read in qkv matrices
read_in_q_k_v(A__ , A__ )
# load HuggingFace model
__lowerCamelCase = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ )
model.load_state_dict(A__ )
model.eval()
# Check outputs on an image
__lowerCamelCase = 480 if """ade""" in checkpoint_url else 384
__lowerCamelCase = DPTImageProcessor(size=A__ )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(A__ , return_tensors="""pt""" )
# forward pass
__lowerCamelCase = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth
# Assert logits
__lowerCamelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(A__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , A__ )
)
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=A__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(A__ , A__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=A__ , )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 80 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = 'naver-clova-ix/donut-base-finetuned-docvqa'
UpperCAmelCase__ : List[Any] = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
UpperCAmelCase__ : List[str] = 'document_qa'
UpperCAmelCase__ : Dict = AutoProcessor
UpperCAmelCase__ : List[Any] = VisionEncoderDecoderModel
UpperCAmelCase__ : str = ['image', 'text']
UpperCAmelCase__ : List[Any] = ['text']
def __init__( self: List[Any] , *UpperCamelCase_: Any , **UpperCamelCase_: List[Any] ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: "Image" , UpperCamelCase_: str ):
__lowerCamelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowerCamelCase = task_prompt.replace("""{user_input}""" , UpperCamelCase_ )
__lowerCamelCase = self.pre_processor.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors="""pt""" ).input_ids
__lowerCamelCase = self.pre_processor(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Union[str, Any] ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase_ , ).sequences
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
__lowerCamelCase = self.pre_processor.batch_decode(UpperCamelCase_ )[0]
__lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
__lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
__lowerCamelCase = re.sub(r"""<.*?>""" , """""" , UpperCamelCase_ , count=1 ).strip() # remove first task start token
__lowerCamelCase = self.pre_processor.tokenajson(UpperCamelCase_ )
return sequence["answer"]
| 710 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=13 , UpperCamelCase_: List[Any]=64 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Tuple=True , UpperCamelCase_: int=True , UpperCamelCase_: List[str]=32 , UpperCamelCase_: Dict=5 , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Tuple=10 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[str]=[1, 16, 4, 4] , UpperCamelCase_: Optional[int]=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__lowerCamelCase = (self.image_size // 32) ** 2
__lowerCamelCase = num_patches + 1
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: str ):
__lowerCamelCase = ViTHybridModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = ViTHybridForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : List[str] = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Tuple = False
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = ViTHybridModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCAmelCase__ ( self: List[str] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__lowerCamelCase = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def lowerCAmelCase__ ( self: Tuple ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = ViTHybridModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__( unittest.TestCase):
@cached_property
def lowerCAmelCase__ ( self: str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase_ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**UpperCamelCase_ )
# verify the logits
__lowerCamelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
__lowerCamelCase = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
__lowerCamelCase = model(**UpperCamelCase_ )
__lowerCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
__lowerCamelCase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 711 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80 | 0 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 712 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0]
__lowerCamelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the largest integer less than b_estimate
__lowerCamelCase = 42
# the triangle number corresponding to b_floor
__lowerCamelCase = 42
# the triangle number corresponding to b_ceil
__lowerCamelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase = floor(A__ )
__lowerCamelCase = ceil(A__ )
__lowerCamelCase = triangle_numbers[b_floor]
__lowerCamelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_first_guess * triangle_a
__lowerCamelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase = triangle_b_second_guess * triangle_a
__lowerCamelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 0 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCAmelCase_ = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
UpperCAmelCase_ = json.load(f)
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[Any] ):
return FSMTTokenizer.from_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int ):
__lowerCamelCase = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__lowerCamelCase = F'facebook/wmt19-{pair}'
__lowerCamelCase = self.get_tokenizer(UpperCamelCase_ )
__lowerCamelCase = self.get_model(UpperCamelCase_ )
__lowerCamelCase = bleu_data[pair]["""src"""]
__lowerCamelCase = bleu_data[pair]["""tgt"""]
__lowerCamelCase = tokenizer(UpperCamelCase_ , return_tensors="""pt""" , truncation=UpperCamelCase_ , padding="""longest""" ).to(UpperCamelCase_ )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__lowerCamelCase = tokenizer.batch_decode(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
__lowerCamelCase = calculate_bleu(UpperCamelCase_ , UpperCamelCase_ )
print(UpperCamelCase_ )
self.assertGreaterEqual(scores["""bleu"""] , UpperCamelCase_ )
| 713 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=UpperCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_downsample:
__lowerCamelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int=True ):
__lowerCamelCase = ()
for resnet in self.resnets:
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase = self.downsamplers_a(UpperCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: List[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = []
for i in range(self.num_layers ):
__lowerCamelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
if self.add_upsample:
__lowerCamelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self: List[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase = res_hidden_states_tuple[-1]
__lowerCamelCase = res_hidden_states_tuple[:-1]
__lowerCamelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
if self.add_upsample:
__lowerCamelCase = self.upsamplers_a(UpperCamelCase_ )
return hidden_states
class lowerCamelCase__( nn.Module):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self: int ):
# there is always at least one resnet
__lowerCamelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase = []
for _ in range(self.num_layers ):
__lowerCamelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCamelCase_ )
__lowerCamelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCamelCase_ )
__lowerCamelCase = resnets
__lowerCamelCase = attentions
def __call__( self: int , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int]=True ):
__lowerCamelCase = self.resnets[0](UpperCamelCase_ , UpperCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase = attn(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
__lowerCamelCase = resnet(UpperCamelCase_ , UpperCamelCase_ , deterministic=UpperCamelCase_ )
return hidden_states
| 80 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
UpperCAmelCase_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
UpperCAmelCase_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def lowerCamelCase__ ( A__ : List[str] , A__ : Optional[int] ):
'''simple docstring'''
with open(A__ , """r""" , encoding="""utf-8""" ) as f:
__lowerCamelCase = json.loads(f.read() )
__lowerCamelCase = collections.OrderedDict()
__lowerCamelCase = collections.OrderedDict()
__lowerCamelCase = collections.OrderedDict()
with open(A__ , """r""" , encoding="""utf-8""" ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(A__ ):
__lowerCamelCase = b
__lowerCamelCase = idx
for wd in b:
__lowerCamelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ['input_ids', 'attention_mask']
def __init__( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: str="<|endoftext|>" , UpperCamelCase_: Dict="<|endoftext|>" , UpperCamelCase_: List[str]="<|startoftext|>" , UpperCamelCase_: List[Any]="<|endoftext|>" , UpperCamelCase_: Optional[Any]=False , **UpperCamelCase_: Tuple , ):
super().__init__(
unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , do_clean_text=UpperCamelCase_ , **UpperCamelCase_ , )
if not os.path.isfile(UpperCamelCase_ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(UpperCamelCase_ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__lowerCamelCase = do_clean_text
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = load_vocab_and_emoji(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def lowerCAmelCase__ ( self: Tuple ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def lowerCAmelCase__ ( self: List[Any] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Dict ):
return self.subword_tokenizer.tokenize(UpperCamelCase_ , clean=self.do_clean_text )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Optional[Any] ):
return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any ):
return self.subword_tokenizer.convert_id_to_token(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = """""".join(UpperCamelCase_ ).strip()
return out_string
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "Conversation" ):
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
__lowerCamelCase = 0
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__lowerCamelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__lowerCamelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__lowerCamelCase = token_index
writer.write(""",""".join(UpperCamelCase_ ) + """\n""" )
index += 1
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , UpperCamelCase_ )
return vocab_file, emoji_file
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict , UpperCamelCase_: int ):
__lowerCamelCase = vocab # same as swe
__lowerCamelCase = ids_to_tokens # same as bpe
__lowerCamelCase = emoji
__lowerCamelCase = np.max([len(UpperCamelCase_ ) for w in self.vocab.keys()] )
__lowerCamelCase = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__lowerCamelCase = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__lowerCamelCase = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__lowerCamelCase = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCamelCase = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCamelCase = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__lowerCamelCase = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__lowerCamelCase = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__lowerCamelCase = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self: int ):
return len(self.ids_to_tokens )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.content_repattera.sub("""<URL>""" , UpperCamelCase_ )
__lowerCamelCase = self.content_repattera.sub("""<EMAIL>""" , UpperCamelCase_ )
__lowerCamelCase = self.content_repattera.sub("""<TEL>""" , UpperCamelCase_ )
__lowerCamelCase = self.content_repattera.sub("""<DATE>""" , UpperCamelCase_ )
__lowerCamelCase = self.content_repattera.sub("""<DATE>""" , UpperCamelCase_ )
__lowerCamelCase = self.content_repattera.sub("""<PRICE>""" , UpperCamelCase_ )
__lowerCamelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCamelCase = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: str=False ):
__lowerCamelCase = text.replace(""" """ , """<SP>""" )
__lowerCamelCase = text.replace(""" """ , """<SP>""" )
__lowerCamelCase = text.replace("""\r\n""" , """<BR>""" )
__lowerCamelCase = text.replace("""\n""" , """<BR>""" )
__lowerCamelCase = text.replace("""\r""" , """<BR>""" )
__lowerCamelCase = text.replace("""\t""" , """<TAB>""" )
__lowerCamelCase = text.replace("""—""" , """ー""" )
__lowerCamelCase = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCamelCase = text.replace(UpperCamelCase_ , UpperCamelCase_ )
if clean:
__lowerCamelCase = self.clean_text(UpperCamelCase_ )
def check_simbol(UpperCamelCase_: int ):
__lowerCamelCase = x.encode()
if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 2:
__lowerCamelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = x.encode()
if len(UpperCamelCase_ ) == 1 and len(UpperCamelCase_ ) == 3:
__lowerCamelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
__lowerCamelCase = 0
__lowerCamelCase = []
while pos < len(UpperCamelCase_ ):
__lowerCamelCase = min(len(UpperCamelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__lowerCamelCase = [] # (token_id, token, pos)
for e in range(UpperCamelCase_ , UpperCamelCase_ , -1 ):
__lowerCamelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCamelCase_ ) > 2:
__lowerCamelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(UpperCamelCase_ ) > 0:
# the smallest token_id is adopted
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[0] )[0]
result.append(UpperCamelCase_ )
__lowerCamelCase = e
else:
__lowerCamelCase = pos + 1
__lowerCamelCase = text[pos:end]
if check_simbol(UpperCamelCase_ ):
result.append("""<KIGOU>""" )
elif checkuae(UpperCamelCase_ ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__lowerCamelCase = end
return result
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any]="\n" ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(UpperCamelCase_ ) > 0:
words.append(bytearray(UpperCamelCase_ ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCamelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(UpperCamelCase_ )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
words.append(bytearray(UpperCamelCase_ ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCamelCase = """""".join(UpperCamelCase_ )
return text
| 714 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
UpperCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
UpperCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
UpperCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__( datasets.Metric):
def lowerCAmelCase__ ( self: Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str ):
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Any=0.9 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: Union[str, Any]=0.5 ):
if NLTK_VERSION >= version.Version("""3.6.5""" ):
__lowerCamelCase = [
meteor_score.single_meteor_score(
word_tokenize(UpperCamelCase_ ) , word_tokenize(UpperCamelCase_ ) , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ )
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ )
]
else:
__lowerCamelCase = [
meteor_score.single_meteor_score(UpperCamelCase_ , UpperCamelCase_ , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ )
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return {"meteor": np.mean(UpperCamelCase_ )}
| 715 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ):
__lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
__lowerCamelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 80 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = StableUnCLIPPipeline
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = 32
__lowerCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=UpperCamelCase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__lowerCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase_ , num_layers=1 , )
torch.manual_seed(0 )
__lowerCamelCase = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=UpperCamelCase_ , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCamelCase = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase_ )
__lowerCamelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase_ , layers_per_block=1 , upcast_attention=UpperCamelCase_ , use_linear_projection=UpperCamelCase_ , )
torch.manual_seed(0 )
__lowerCamelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=UpperCamelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL()
__lowerCamelCase = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[int]=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase_ )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
__lowerCamelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCamelCase = pipe("""anime turle""" , generator=UpperCamelCase_ , output_type="""np""" )
__lowerCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 716 |
def lowerCamelCase__ ( A__ : int = 2000000 ):
'''simple docstring'''
__lowerCamelCase = [0 for i in range(n + 1 )]
__lowerCamelCase = 1
__lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A__ ):
__lowerCamelCase = 1
__lowerCamelCase = 0
for i in range(A__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 80 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
UpperCAmelCase__ : ClassVar[Features] = Features({'audio': Audio()})
UpperCAmelCase__ : ClassVar[Features] = Features({'labels': ClassLabel})
UpperCAmelCase__ : str = "audio"
UpperCAmelCase__ : str = "labels"
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tuple ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , UpperCamelCase_ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
__lowerCamelCase = copy.deepcopy(self )
__lowerCamelCase = self.label_schema.copy()
__lowerCamelCase = features[self.label_column]
__lowerCamelCase = label_schema
return task_template
@property
def lowerCAmelCase__ ( self: int ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 717 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Dict = 1
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase_ )
# standard deviation of the initial noise distribution
__lowerCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCamelCase = 4
# running values
__lowerCamelCase = []
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCamelCase = (1.0 - self.betas**2) ** 0.5
__lowerCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCamelCase = timesteps.to(UpperCamelCase_ )
__lowerCamelCase = []
def lowerCAmelCase__ ( self: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowerCamelCase = (self.timesteps == timestep).nonzero().item()
__lowerCamelCase = timestep_index + 1
__lowerCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase_ )
if len(self.ets ) == 1:
__lowerCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCamelCase = self._get_prev_sample(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , *UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
return sample
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = self.alphas[timestep_index]
__lowerCamelCase = self.betas[timestep_index]
__lowerCamelCase = self.alphas[prev_timestep_index]
__lowerCamelCase = self.betas[prev_timestep_index]
__lowerCamelCase = (sample - sigma * ets) / max(UpperCamelCase_ , 1E-8 )
__lowerCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: List[Any] ):
return self.config.num_train_timesteps
| 80 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: Distribution , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = 1.0 if scale is None else scale
__lowerCamelCase = 0.0 if loc is None else loc
super().__init__(UpperCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCamelCase_ )] )
@property
def lowerCAmelCase__ ( self: Optional[int] ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__ ( self: List[str] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return self.variance.sqrt()
class lowerCamelCase__( nn.Module):
def __init__( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: Callable[..., Tuple[torch.Tensor]] , **UpperCamelCase_: Optional[Any] ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = args_dim
__lowerCamelCase = nn.ModuleList([nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) for dim in args_dim.values()] )
__lowerCamelCase = domain_map
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: torch.Tensor ):
__lowerCamelCase = [proj(UpperCamelCase_ ) for proj in self.proj]
return self.domain_map(*UpperCamelCase_ )
class lowerCamelCase__( nn.Module):
def __init__( self: List[Any] , UpperCamelCase_: Union[str, Any] ):
super().__init__()
__lowerCamelCase = function
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: List[Any] ):
return self.function(UpperCamelCase_ , *UpperCamelCase_ )
class lowerCamelCase__:
UpperCAmelCase__ : type
UpperCAmelCase__ : int
UpperCAmelCase__ : Dict[str, int]
def __init__( self: Optional[Any] , UpperCamelCase_: int = 1 ):
__lowerCamelCase = dim
__lowerCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Tuple ):
if self.dim == 1:
return self.distribution_class(*UpperCamelCase_ )
else:
return Independent(self.distribution_class(*UpperCamelCase_ ) , 1 )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[torch.Tensor] = None , UpperCamelCase_: Optional[torch.Tensor] = None , ):
__lowerCamelCase = self._base_distribution(UpperCamelCase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCamelCase_ , loc=UpperCamelCase_ , scale=UpperCamelCase_ , event_dim=self.event_dim )
@property
def lowerCAmelCase__ ( self: Tuple ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__ ( self: str ):
return len(self.event_shape )
@property
def lowerCAmelCase__ ( self: Dict ):
return 0.0
def lowerCAmelCase__ ( self: int , UpperCamelCase_: int ):
return ParameterProjection(
in_features=UpperCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__ ( self: Optional[int] , *UpperCamelCase_: torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: torch.Tensor ):
return (x + torch.sqrt(torch.square(UpperCamelCase_ ) + 4.0 )) / 2.0
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
UpperCAmelCase__ : type = StudentT
@classmethod
def lowerCAmelCase__ ( cls: Dict , UpperCamelCase_: torch.Tensor , UpperCamelCase_: torch.Tensor , UpperCamelCase_: torch.Tensor ):
__lowerCamelCase = cls.squareplus(UpperCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCamelCase = 2.0 + cls.squareplus(UpperCamelCase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict[str, int] = {"loc": 1, "scale": 1}
UpperCAmelCase__ : type = Normal
@classmethod
def lowerCAmelCase__ ( cls: int , UpperCamelCase_: torch.Tensor , UpperCamelCase_: torch.Tensor ):
__lowerCamelCase = cls.squareplus(UpperCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict[str, int] = {"total_count": 1, "logits": 1}
UpperCAmelCase__ : type = NegativeBinomial
@classmethod
def lowerCAmelCase__ ( cls: Any , UpperCamelCase_: torch.Tensor , UpperCamelCase_: torch.Tensor ):
__lowerCamelCase = cls.squareplus(UpperCamelCase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase, __lowerCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCamelCase_ , logits=UpperCamelCase_ )
else:
return Independent(self.distribution_class(total_count=UpperCamelCase_ , logits=UpperCamelCase_ ) , 1 )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[torch.Tensor] = None , UpperCamelCase_: Optional[torch.Tensor] = None ):
__lowerCamelCase, __lowerCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 718 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(A__ ):
__lowerCamelCase = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A__ )[1] in (".py", ".ipynb"):
yield os.path.join(A__ , A__ ).lstrip("""./""" )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return f'{i * " "}*' if i else "\n##"
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowerCamelCase__ ( A__ : str = "." ):
'''simple docstring'''
__lowerCamelCase = """"""
for filepath in sorted(good_file_paths(A__ ) ):
__lowerCamelCase, __lowerCamelCase = os.path.split(A__ )
if filepath != old_path:
__lowerCamelCase = print_path(A__ , A__ )
__lowerCamelCase = (filepath.count(os.sep ) + 1) if filepath else 0
__lowerCamelCase = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
__lowerCamelCase = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(A__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 80 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase_ = logging.getLogger(__name__)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Any , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: Dict=None ):
super().__init__(
UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__lowerCamelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowerCamelCase = str(distributed_port + 1 )
__lowerCamelCase = dist.new_group(ranks=UpperCamelCase_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCAmelCase__ ( self: Tuple ):
return dist.get_rank(group=self.process_group ) == 0
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str]=torch.floataa ):
__lowerCamelCase = torch.empty(UpperCamelCase_ , dtype=UpperCamelCase_ )
dist.scatter(UpperCamelCase_ , src=0 , scatter_list=UpperCamelCase_ , group=self.process_group )
return target_tensor
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowerCamelCase = next((addr for addr in addrs if addr.startswith("""e""" )) , UpperCamelCase_ )
return ifname
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int ):
# single GPU training
if not dist.is_initialized():
__lowerCamelCase, __lowerCamelCase = self._main_retrieve(UpperCamelCase_ , UpperCamelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase_ )
# distributed training
__lowerCamelCase = dist.get_world_size(group=self.process_group )
# gather logic
__lowerCamelCase = None
if self._is_main():
__lowerCamelCase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCamelCase_ )]
dist.gather(torch.tensor(UpperCamelCase_ ) , dst=0 , gather_list=UpperCamelCase_ , group=self.process_group )
# scatter logic
__lowerCamelCase = question_hidden_states.shape[0]
__lowerCamelCase = []
__lowerCamelCase = []
if self._is_main():
assert len(UpperCamelCase_ ) == world_size
__lowerCamelCase, __lowerCamelCase = self._main_retrieve(torch.cat(UpperCamelCase_ ).numpy() , UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = torch.tensor(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
__lowerCamelCase = self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self._scattered(UpperCamelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
__lowerCamelCase = self._scattered(UpperCamelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCamelCase_ )
| 719 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
if not nums:
raise ValueError("""List is empty""" )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = get_activation("""gelu_10""" )
__lowerCamelCase = torch_builtin(UpperCamelCase_ )
__lowerCamelCase = geluaa(UpperCamelCase_ )
__lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self: str ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = get_activation("""gelu""" )
__lowerCamelCase = 1
__lowerCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = acta.a
| 720 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : Any = 'maskformer-swin'
UpperCAmelCase__ : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Any , UpperCamelCase_: Any=2_24 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Optional[int]=96 , UpperCamelCase_: List[str]=[2, 2, 6, 2] , UpperCamelCase_: Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_: str=7 , UpperCamelCase_: int=4.0 , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: int=False , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=1E-5 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__lowerCamelCase, __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 80 | 0 |
from __future__ import annotations
import numpy as np
def lowerCamelCase__ ( A__ : list[float] ):
'''simple docstring'''
return np.maximum(0 , A__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 721 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa]
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ):
'''simple docstring'''
if length > 1:
__lowerCamelCase = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 80 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
__lowerCamelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowerCamelCase = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , **UpperCamelCase_: int ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , **UpperCamelCase_: Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , **UpperCamelCase_: List[Any] ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowerCamelCase = self.get_image_processor(do_normalize=UpperCamelCase_ )
__lowerCamelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = processor(images=UpperCamelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = """lower newer"""
__lowerCamelCase = processor(text=UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = tokenizer(UpperCamelCase_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = """lower newer"""
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = """google/owlvit-base-patch32"""
__lowerCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = ["""cat""", """nasa badge"""]
__lowerCamelCase = processor(text=UpperCamelCase_ )
__lowerCamelCase = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """google/owlvit-base-patch32"""
__lowerCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = [["""cat""", """nasa badge"""], ["""person"""]]
__lowerCamelCase = processor(text=UpperCamelCase_ )
__lowerCamelCase = 16
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = max([len(UpperCamelCase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """google/owlvit-base-patch32"""
__lowerCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = ["""cat""", """nasa badge"""]
__lowerCamelCase = processor(text=UpperCamelCase_ )
__lowerCamelCase = 16
__lowerCamelCase = inputs["""input_ids"""]
__lowerCamelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.batch_decode(UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 700 |
from ... import PretrainedConfig
UpperCAmelCase_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = 'nezha'
def __init__( self: Dict , UpperCamelCase_: Any=2_11_28 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: Optional[int]=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Union[str, Any]=5_12 , UpperCamelCase_: Any=64 , UpperCamelCase_: Dict=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: Optional[Any]=1E-12 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: str=True , **UpperCamelCase_: Any , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = max_relative_position
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = classifier_dropout
__lowerCamelCase = use_cache
| 80 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowerCamelCase = len(A__ )
__lowerCamelCase = max(A__ )
__lowerCamelCase = min(A__ )
# create the counting array
__lowerCamelCase = coll_max + 1 - coll_min
__lowerCamelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , A__ ):
__lowerCamelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowerCamelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , A__ ) ):
__lowerCamelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return "".join([chr(A__ ) for i in counting_sort([ord(A__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 701 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: str = None , UpperCamelCase_: uuid.UUID = None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=None ):
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self: Optional[Any] , UpperCamelCase_: Union[str, Any] ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: str , UpperCamelCase_: bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
__lowerCamelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__lowerCamelCase = text
def lowerCAmelCase__ ( self: List[str] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
self.generated_responses.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: Union[str, Any] ):
__lowerCamelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__lowerCamelCase = """user""" if is_user else """bot"""
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowerCamelCase , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[str] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: str ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int=None , UpperCamelCase_: Any=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: int ):
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[Conversation, List[Conversation]] , UpperCamelCase_: Optional[int]=0 , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = super().__call__(UpperCamelCase_ , num_workers=UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Conversation , UpperCamelCase_: Optional[Any]=32 ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(UpperCamelCase_ )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str=10 , **UpperCamelCase_: List[str] ):
__lowerCamelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
__lowerCamelCase = model_inputs.pop("""conversation""" )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int=True ):
__lowerCamelCase = model_outputs["""output_ids"""]
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , )
__lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCamelCase_ )
return conversation
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Conversation ):
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 80 | 0 |
from __future__ import annotations
class lowerCamelCase__:
def __init__( self: Optional[int] , UpperCamelCase_: int ):
__lowerCamelCase = data
__lowerCamelCase = None
__lowerCamelCase = None
def lowerCamelCase__ ( A__ : Node | None ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase__ ( A__ : Node | None ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase__ ( A__ : Node ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase__ ( ): # Main function for testing.
'''simple docstring'''
__lowerCamelCase = Node(1 )
__lowerCamelCase = Node(2 )
__lowerCamelCase = Node(3 )
__lowerCamelCase = Node(4 )
__lowerCamelCase = Node(5 )
__lowerCamelCase = Node(6 )
__lowerCamelCase = Node(7 )
__lowerCamelCase = Node(8 )
__lowerCamelCase = Node(9 )
print(is_full_binary_tree(A__ ) )
print(depth_of_tree(A__ ) )
print("""Tree is: """ )
display(A__ )
if __name__ == "__main__":
main()
| 702 |
import math
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = 2
__lowerCamelCase = int(math.sqrt(A__ ) ) # Size of every segment
__lowerCamelCase = [True] * (end + 1)
__lowerCamelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
__lowerCamelCase = False
start += 1
prime += in_prime
__lowerCamelCase = end + 1
__lowerCamelCase = min(2 * end , A__ )
while low <= n:
__lowerCamelCase = [True] * (high - low + 1)
for each in in_prime:
__lowerCamelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
__lowerCamelCase = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCamelCase = high + 1
__lowerCamelCase = min(high + end , A__ )
return prime
print(sieve(10**6))
| 80 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline
UpperCAmelCase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str]=0 ):
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = """french fries"""
__lowerCamelCase = sd_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
__lowerCamelCase = output.images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = [inputs["""prompt"""]] * 2
__lowerCamelCase = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
__lowerCamelCase = image / 2 + 0.5
__lowerCamelCase = image.permute(0 , 3 , 1 , 2 )
__lowerCamelCase = image.repeat(2 , 1 , 1 , 1 )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__lowerCamelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = [round(UpperCamelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(UpperCamelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
__lowerCamelCase = VaeImageProcessor(do_resize=UpperCamelCase_ , do_normalize=UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type="""pt""" ) )[0]
__lowerCamelCase = components["""vae"""]
__lowerCamelCase = self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCamelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__lowerCamelCase = pipe(**UpperCamelCase_ )[0]
__lowerCamelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase_ , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str=0 ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__lowerCamelCase = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase_ )
__lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase_ )
__lowerCamelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def callback_fn(UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: torch.FloatTensor ) -> None:
__lowerCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowerCamelCase = latents[0, -3:, -3:, -1]
__lowerCamelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowerCamelCase = latents[0, -3:, -3:, -1]
__lowerCamelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__lowerCamelCase = False
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = self.get_inputs()
pipe(**UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self: List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = self.get_inputs()
__lowerCamelCase = pipe(**UpperCamelCase_ )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCamelCase = inputs["""image"""].resize((5_04, 5_04) )
__lowerCamelCase = """timbrooks/instruct-pix2pix"""
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = pipe(**UpperCamelCase_ )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
__lowerCamelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 703 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = BartphoTokenizer
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = True
def lowerCAmelCase__ ( self: Tuple ):
super().setUp()
__lowerCamelCase = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'{token} {vocab_tokens[token]}\n' )
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: List[str] ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BartphoTokenizer(UpperCamelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__lowerCamelCase = """This is a là test"""
__lowerCamelCase = """▁This ▁is ▁a ▁l à ▁t est""".split()
__lowerCamelCase = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
| 80 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowerCamelCase__:
UpperCAmelCase__ : List[str] = MBartConfig
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : int = 'gelu'
def __init__( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: str=13 , UpperCamelCase_: Union[str, Any]=7 , UpperCamelCase_: Dict=True , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: str=99 , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: Any=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Union[str, Any]=37 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Tuple=20 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: Dict=0 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCamelCase = prepare_mbart_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = TFMBartModel(config=UpperCamelCase_ ).get_decoder()
__lowerCamelCase = inputs_dict["""input_ids"""]
__lowerCamelCase = input_ids[:1, :]
__lowerCamelCase = inputs_dict["""attention_mask"""][:1, :]
__lowerCamelCase = inputs_dict["""head_mask"""]
__lowerCamelCase = 1
# first forward pass
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase = outputs.to_tuple()
__lowerCamelCase = past_key_values[1]
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Dict , A__ : Tuple=None , A__ : List[Any]=None , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCamelCase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ : List[str] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Union[str, Any] = False
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: str ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = TFMBartModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : Any = [
' UN Chief Says There Is No Military Solution in Syria',
]
UpperCAmelCase__ : Tuple = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
UpperCAmelCase__ : str = 'facebook/mbart-large-en-ro'
@cached_property
def lowerCAmelCase__ ( self: Tuple ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase__ ( self: Union[str, Any] , **UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.translate_src_text(**UpperCamelCase_ )
self.assertListEqual(self.expected_text , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , **UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.tokenizer(self.src_text , **UpperCamelCase_ , return_tensors="""tf""" )
__lowerCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__lowerCamelCase = self.tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def lowerCAmelCase__ ( self: List[Any] ):
self._assert_generated_batch_equal_expected()
| 704 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.