code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__lowerCAmelCase : Dict =logging.get_logger(__name__)
enable_full_determinism()
class _A ( snake_case_ , snake_case_ , unittest.TestCase ):
snake_case__ : Tuple = UNetaDModel
snake_case__ : Dict = 'sample'
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 4
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
lowercase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def A__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def A__ ( self ):
"""simple docstring"""
lowercase = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
lowercase = self.dummy_input
return init_dict, inputs_dict
class _A ( snake_case_ , snake_case_ , unittest.TestCase ):
snake_case__ : Optional[Any] = UNetaDModel
snake_case__ : Optional[Any] = 'sample'
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 4
lowercase = 4
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
lowercase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self ):
"""simple docstring"""
return (4, 32, 32)
@property
def A__ ( self ):
"""simple docstring"""
return (4, 32, 32)
def A__ ( self ):
"""simple docstring"""
lowercase = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
lowercase = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ):
"""simple docstring"""
lowercase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
lowercase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model.to(__lowerCAmelCase )
lowercase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model_accelerate.to(__lowerCAmelCase )
model_accelerate.eval()
lowercase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase = noise.to(__lowerCAmelCase )
lowercase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
lowercase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowercase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase )
model_normal_load.to(__lowerCAmelCase )
model_normal_load.eval()
lowercase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )['sample']
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-3 )
def A__ ( self ):
"""simple docstring"""
lowercase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__lowerCAmelCase )
lowercase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase = noise.to(__lowerCAmelCase )
lowercase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
with torch.no_grad():
lowercase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
lowercase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-3 ) )
class _A ( snake_case_ , snake_case_ , unittest.TestCase ):
snake_case__ : List[Any] = UNetaDModel
snake_case__ : str = 'sample'
@property
def A__ ( self , __lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowercase = 4
lowercase = 3
lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
lowercase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def A__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def A__ ( self ):
"""simple docstring"""
lowercase = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
lowercase = self.dummy_input
return init_dict, inputs_dict
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
lowercase = self.dummy_input
lowercase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase )
lowercase = noise
lowercase = model(**__lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__lowerCAmelCase )
lowercase = 4
lowercase = 3
lowercase = (256, 256)
lowercase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
lowercase = torch.tensor(batch_size * [1E-4] ).to(__lowerCAmelCase )
with torch.no_grad():
lowercase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
lowercase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-2 ) )
def A__ ( self ):
"""simple docstring"""
lowercase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__lowerCAmelCase )
lowercase = 4
lowercase = 3
lowercase = (32, 32)
lowercase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
lowercase = torch.tensor(batch_size * [1E-4] ).to(__lowerCAmelCase )
with torch.no_grad():
lowercase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
lowercase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-2 ) )
def A__ ( self ):
"""simple docstring"""
pass
| 197 | def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Dict = 0
for ch in input_str:
UpperCamelCase_ : Tuple = ord(lowerCamelCase )
UpperCamelCase_ : str = pow(2 , lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 0 |
from copy import deepcopy
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase = None , UpperCAmelCase = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
lowercase_ = size
lowercase_ = [0] * size
elif arr is not None:
self.init(UpperCAmelCase )
else:
raise ValueError("Either arr or size must be specified" )
def A__ ( self , UpperCAmelCase ) -> None:
'''simple docstring'''
lowercase_ = len(UpperCAmelCase )
lowercase_ = deepcopy(UpperCAmelCase )
for i in range(1 , self.size ):
lowercase_ = self.next_(UpperCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def A__ ( self ) -> list[int]:
'''simple docstring'''
lowercase_ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowercase_ = self.next_(UpperCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def A__ ( UpperCAmelCase ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def A__ ( UpperCAmelCase ) -> int:
'''simple docstring'''
return index - (index & (-index))
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowercase_ = self.next_(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
self.add(UpperCAmelCase , value - self.get(UpperCAmelCase ) )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
if right == 0:
return 0
lowercase_ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowercase_ = self.prev(UpperCAmelCase )
return result
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
return self.prefix(UpperCAmelCase ) - self.prefix(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self.query(UpperCAmelCase , index + 1 )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
lowercase_ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowercase_ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = data
def __iter__( self ) -> List[str]:
'''simple docstring'''
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ):
'''simple docstring'''
lowercase_ = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ):
'''simple docstring'''
if iterable:
lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = accelerator.prepare(__lowerCamelCase )
return dl
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ):
'''simple docstring'''
lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
lowercase_ = ddp_model(batch[0].float() )
lowercase_ = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = train_dl.batch_sampler.even_batches
lowercase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = True
lowercase_ = False
lowercase_ = create_accelerator(even_batches=__lowerCamelCase )
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
lowercase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
lowercase_ = torch.nn.Linear(1 , 1 )
lowercase_ = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowercase_ = accelerator.state.distributed_type
lowercase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
lowercase_ = original_state
if __name__ == "__main__":
main()
| 297 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=[1, 1, 2] , a__=1 , a__=32 , a__=4 , a__=8 , a__=37 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=0.0 , a__=512 , a__=3 , a__=0.0_2 , a__=3 , a__=4 , a__=None , a__=False , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = block_sizes
_lowerCAmelCase : List[str] = num_decoder_layers
_lowerCAmelCase : List[Any] = d_model
_lowerCAmelCase : List[Any] = n_head
_lowerCAmelCase : Any = d_head
_lowerCAmelCase : Union[str, Any] = d_inner
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : Optional[int] = attention_dropout
_lowerCAmelCase : int = activation_dropout
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Dict = num_choices
_lowerCAmelCase : Optional[int] = scope
_lowerCAmelCase : List[str] = initializer_std
# Used in the tests to check the size of the first attention layer
_lowerCAmelCase : Dict = n_head
# Used in the tests to check the size of the first hidden state
_lowerCAmelCase : Union[str, Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_lowerCAmelCase : List[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_lowerCAmelCase : List[Any] = self.num_hidden_layers + 2
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Dict = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Tuple = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : List[str] = TFFunnelModel(config=a__ )
_lowerCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : List[Any] = model(a__ )
_lowerCAmelCase : Tuple = [input_ids, input_mask]
_lowerCAmelCase : List[str] = model(a__ )
_lowerCAmelCase : Any = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_lowerCAmelCase : str = False
_lowerCAmelCase : List[str] = TFFunnelModel(config=a__ )
_lowerCAmelCase : Optional[int] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_lowerCAmelCase : int = False
_lowerCAmelCase : Tuple = TFFunnelModel(config=a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Optional[Any] = TFFunnelBaseModel(config=a__ )
_lowerCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Any = model(a__ )
_lowerCAmelCase : Optional[int] = [input_ids, input_mask]
_lowerCAmelCase : Any = model(a__ )
_lowerCAmelCase : Optional[int] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : int = TFFunnelBaseModel(config=a__ )
_lowerCAmelCase : List[str] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_lowerCAmelCase : Any = False
_lowerCAmelCase : Optional[Any] = TFFunnelBaseModel(config=a__ )
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Union[str, Any] = TFFunnelForPreTraining(config=a__ )
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Optional[int] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Any = TFFunnelForMaskedLM(config=a__ )
_lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : List[str] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : List[str] = TFFunnelForSequenceClassification(config=a__ )
_lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : List[Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : List[str] = self.num_choices
_lowerCAmelCase : Dict = TFFunnelForMultipleChoice(config=a__ )
_lowerCAmelCase : Any = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : int = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : str = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Tuple = self.num_labels
_lowerCAmelCase : List[Any] = TFFunnelForTokenClassification(config=a__ )
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Tuple = TFFunnelForQuestionAnswering(config=a__ )
_lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase : int = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[str] = False
def __A ( self ):
_lowerCAmelCase : Dict = TFFunnelModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
def __A ( self ):
_lowerCAmelCase : Tuple = TFFunnelModelTester(self , base=a__ )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
| 44 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 343 | 0 |
from math import pow, sqrt
def __lowerCamelCase ( *lowerCAmelCase__ ):
lowerCAmelCase__ = len(lowerCAmelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 367 | import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase__ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase__ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class a_ :
'''simple docstring'''
def __call__( self : Optional[int] , lowercase__ : List[str] , lowercase__ : Optional[str] = None , lowercase__ : Optional[str] = None , lowercase__ : Union[bool, str] = False , lowercase__ : Union[bool, str] = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[bool] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
elif titles is None or texts is None:
lowerCAmelCase__ = titles if texts is None else texts
return super().__call__(
lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = titles if not isinstance(lowercase__ , lowercase__) else [titles]
lowerCAmelCase__ = texts if not isinstance(lowercase__ , lowercase__) else [texts]
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = questions if not isinstance(lowercase__ , lowercase__) else [questions] * n_passages
if len(lowercase__) != len(lowercase__):
raise ValueError(
F"""There should be as many titles than texts but got {len(lowercase__)} titles and {len(lowercase__)} texts.""")
lowerCAmelCase__ = super().__call__(lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__)['input_ids']
lowerCAmelCase__ = super().__call__(lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__)['input_ids']
lowerCAmelCase__ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase__ , lowercase__)
]
}
if return_attention_mask is not False:
lowerCAmelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
lowerCAmelCase__ = attention_mask
return self.pad(lowercase__ , padding=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__)
def __snake_case ( self : Union[str, Any] , lowercase__ : BatchEncoding , lowercase__ : DPRReaderOutput , lowercase__ : int = 16 , lowercase__ : int = 64 , lowercase__ : int = 4 , ):
'''simple docstring'''
lowerCAmelCase__ = reader_input['input_ids']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reader_output[:3]
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = sorted(range(lowercase__) , reverse=lowercase__ , key=relevance_logits.__getitem__)
lowerCAmelCase__ = []
for doc_id in sorted_docs:
lowerCAmelCase__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ = sequence_ids.index(self.pad_token_id)
else:
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase__ , top_spans=lowercase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase__ , start_index=lowercase__ , end_index=lowercase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowercase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __snake_case ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : List[int] , lowercase__ : int , lowercase__ : int , ):
'''simple docstring'''
lowerCAmelCase__ = []
for start_index, start_score in enumerate(lowercase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
lowerCAmelCase__ = sorted(lowercase__ , key=lambda lowercase__: x[1] , reverse=lowercase__)
lowerCAmelCase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""")
lowerCAmelCase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowercase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class a_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = ['input_ids', 'attention_mask']
| 119 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'canine'
def __init__( self : str ,lowercase__ : Any=7_6_8 ,lowercase__ : List[str]=1_2 ,lowercase__ : Tuple=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : List[str]="gelu" ,lowercase__ : Optional[Any]=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Dict=1_6_3_8_4 ,lowercase__ : Tuple=1_6 ,lowercase__ : Any=0.0_2 ,lowercase__ : str=1e-1_2 ,lowercase__ : str=0 ,lowercase__ : Tuple=0xe_000 ,lowercase__ : Optional[int]=0xe_001 ,lowercase__ : List[str]=4 ,lowercase__ : List[str]=4 ,lowercase__ : List[Any]=8 ,lowercase__ : Optional[int]=1_6_3_8_4 ,lowercase__ : Union[str, Any]=1_2_8 ,**lowercase__ : List[str] ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
# Character config:
__lowercase = downsampling_rate
__lowercase = upsampling_kernel_size
__lowercase = num_hash_functions
__lowercase = num_hash_buckets
__lowercase = local_transformer_stride
| 104 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KT''')
lowerCAmelCase__ = TypeVar('''VT''')
class lowercase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : KT | str = "root" ,lowercase__ : VT | None = None ):
__lowercase = key
__lowercase = value
__lowercase = []
def __repr__( self : Tuple ):
return F"Node({self.key}: {self.value})"
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return len(self.forward )
class lowercase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : int ,lowercase__ : float = 0.5 ,lowercase__ : int = 1_6 ):
__lowercase = Node[KT, VT]()
__lowercase = 0
__lowercase = p
__lowercase = max_level
def __str__( self : List[str] ):
__lowercase = list(self )
if len(lowercase__ ) == 0:
return F"SkipList(level={self.level})"
__lowercase = max((len(str(lowercase__ ) ) for item in items) ,default=4 )
__lowercase = max(lowercase__ ,4 ) + 4
__lowercase = self.head
__lowercase = []
__lowercase = node.forward.copy()
lines.append(F"[{node.key}]".ljust(lowercase__ ,'''-''' ) + '''* ''' * len(lowercase__ ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
while len(node.forward ) != 0:
__lowercase = node.forward[0]
lines.append(
F"[{node.key}]".ljust(lowercase__ ,'''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
__lowercase = node.forward
lines.append('''None'''.ljust(lowercase__ ) + '''* ''' * len(lowercase__ ) )
return F"SkipList(level={self.level})\n" + "\n".join(lowercase__ )
def __iter__( self : List[str] ):
__lowercase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__lowercase = node.forward[0]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : str ):
__lowercase = []
__lowercase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__lowercase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : KT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
for i, update_node in enumerate(lowercase__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__lowercase = node.forward[i]
else:
__lowercase = update_node.forward[:i]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : KT ,lowercase__ : VT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
__lowercase = value
else:
__lowercase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,lowercase__ ):
update_vector.append(self.head )
__lowercase = level
__lowercase = Node(lowercase__ ,lowercase__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase__ )
else:
__lowercase = new_node
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : VT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
return node.value
return None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__lowercase = skip_list.head
__lowercase = {}
while node.level != 0:
__lowercase = node.forward[0]
__lowercase = node.value
assert len(A__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__lowercase = skip_list.head
__lowercase = {}
while node.level != 0:
__lowercase = node.forward[0]
__lowercase = node.value
if len(A__ ) != 4:
print()
assert len(A__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
assert skip_list.find('''Some key''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(A__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _A ( ):
"""simple docstring"""
def is_sorted(A__ ):
return all(next_item >= item for item, next_item in zip(A__ , lst[1:] ) )
__lowercase = SkipList()
for i in range(10 ):
skip_list.insert(A__ , A__ )
assert is_sorted(list(A__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(A__ ) )
def _A ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 104 | 1 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=0.2 , UpperCAmelCase_ : Optional[int]=0.2):
"""simple docstring"""
a : int = bp_numa
a : int = bp_numa
a : List[Any] = bp_numa
a : int = conva_get[:2]
a : List[Any] = conva_get[2]
a : List[Any] = size_pa
a : Optional[Any] = rate_w
a : Tuple = rate_t
a : List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
a : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a : str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a : Dict = -2 * np.random.rand(self.conva[1]) + 1
a : List[str] = -2 * np.random.rand(self.num_bpa) + 1
a : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : Tuple = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(UpperCAmelCase_ , 'wb') as f:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_)
print(f"""Model saved: {save_path}""")
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
with open(UpperCAmelCase_ , 'rb') as f:
a : str = pickle.load(UpperCAmelCase_) # noqa: S301
a : Any = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
a : Dict = model_dic.get('size_pooling1')
a : str = model_dic.get('num_bp1')
a : Union[str, Any] = model_dic.get('num_bp2')
a : str = model_dic.get('num_bp3')
a : List[str] = model_dic.get('rate_weight')
a : str = model_dic.get('rate_thre')
# create model instance
a : Optional[Any] = CNN(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# modify model parameter
a : Union[str, Any] = model_dic.get('w_conv1')
a : int = model_dic.get('wkj')
a : List[Any] = model_dic.get('vji')
a : List[Any] = model_dic.get('thre_conv1')
a : Optional[Any] = model_dic.get('thre_bp2')
a : int = model_dic.get('thre_bp3')
return conv_ins
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : int):
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return round(UpperCAmelCase_ , 3)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Dict = convs[0]
a : Tuple = convs[1]
a : Any = np.shape(UpperCAmelCase_)[0]
# get the data slice of original image data, data_focus
a : str = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_):
a : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCAmelCase_)
# calculate the feature map of every single kernel, and saved as list of matrix
a : Union[str, Any] = []
a : Optional[Any] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(UpperCAmelCase_):
a : Union[str, Any] = []
for i_focus in range(len(UpperCAmelCase_)):
a : List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCAmelCase_))
a : str = np.asmatrix(UpperCAmelCase_).reshape(
UpperCAmelCase_ , UpperCAmelCase_)
data_featuremap.append(UpperCAmelCase_)
# expanding the data slice to One dimenssion
a : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCAmelCase_))
a : Union[str, Any] = np.asarray(UpperCAmelCase_)
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple="average_pool"):
"""simple docstring"""
a : Dict = len(featuremaps[0])
a : Union[str, Any] = int(size_map / size_pooling)
a : Tuple = []
for i_map in range(len(UpperCAmelCase_)):
a : Dict = featuremaps[i_map]
a : str = []
for i_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
for j_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
a : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCAmelCase_))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCAmelCase_))
a : Any = np.asmatrix(UpperCAmelCase_).reshape(UpperCAmelCase_ , UpperCAmelCase_)
featuremap_pooled.append(UpperCAmelCase_)
return featuremap_pooled
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = []
for i in range(len(UpperCAmelCase_)):
a : int = np.shape(data[i])
a : int = data[i].reshape(1 , shapes[0] * shapes[1])
a : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCAmelCase_)
a : Optional[int] = np.asarray(UpperCAmelCase_)
return data_expanded
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : int):
"""simple docstring"""
a : Any = np.asarray(UpperCAmelCase_)
a : List[Any] = np.shape(UpperCAmelCase_)
a : Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str):
"""simple docstring"""
a : Any = []
a : Optional[int] = 0
for i_map in range(UpperCAmelCase_):
a : Any = np.ones((size_map, size_map))
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
for j in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
a : Dict = pd_pool[
i_pool
]
a : List[str] = i_pool + 1
a : Dict = np.multiply(
UpperCAmelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(UpperCAmelCase_)
return pd_all
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=bool):
"""simple docstring"""
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(UpperCAmelCase_)))
print((' - - Shape: Teach_Data ', np.shape(UpperCAmelCase_)))
a : Optional[Any] = 0
a : Dict = []
a : str = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
a : str = 0
print(f"""-------------Learning Time {rp}--------------""")
for p in range(len(UpperCAmelCase_)):
# print('------------Learning Image: %d--------------'%p)
a : str = np.asmatrix(datas_train[p])
a : int = np.asarray(datas_teach[p])
a : Dict = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a : Any = self.pooling(UpperCAmelCase_ , self.size_poolinga)
a : Dict = np.shape(UpperCAmelCase_)
a : List[Any] = self._expand(UpperCAmelCase_)
a : Union[str, Any] = data_bp_input
a : List[Any] = np.dot(UpperCAmelCase_ , self.vji.T) - self.thre_bpa
a : List[Any] = self.sig(UpperCAmelCase_)
a : str = np.dot(UpperCAmelCase_ , self.wkj.T) - self.thre_bpa
a : List[str] = self.sig(UpperCAmelCase_)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a : int = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCAmelCase_ , (1 - bp_outa)))
a : Dict = np.multiply(
np.dot(UpperCAmelCase_ , self.wkj) , np.multiply(UpperCAmelCase_ , (1 - bp_outa)))
a : List[str] = np.dot(UpperCAmelCase_ , self.vji)
a : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
a : Any = pd_conva_pooled.T.getA().tolist()
a : Dict = self._calculate_gradient_from_pool(
UpperCAmelCase_ , UpperCAmelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
a : Tuple = self._expand_mat(pd_conva_all[k_conv])
a : Any = self.rate_weight * np.dot(UpperCAmelCase_ , UpperCAmelCase_)
a : List[str] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
a : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
a : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a : str = self.thre_bpa - pd_k_all * self.rate_thre
a : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a : Union[str, Any] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a : Union[str, Any] = rp + 1
a : Tuple = error_count / patterns
all_mse.append(UpperCAmelCase_)
def draw_error():
a : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(UpperCAmelCase_ , '+-')
plt.plot(UpperCAmelCase_ , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(UpperCAmelCase_ , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}"""))
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Dict = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(UpperCAmelCase_)))
for p in range(len(UpperCAmelCase_)):
a : int = np.asmatrix(datas_test[p])
a : int = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a : Tuple = self.pooling(UpperCAmelCase_ , self.size_poolinga)
a : str = self._expand(UpperCAmelCase_)
a : str = data_bp_input
a : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
a : Union[str, Any] = self.sig(UpperCAmelCase_)
a : List[Any] = bp_outa * self.wkj.T - self.thre_bpa
a : Tuple = self.sig(UpperCAmelCase_)
produce_out.extend(bp_outa.getA().tolist())
a : Optional[int] = [list(map(self.do_round , UpperCAmelCase_)) for each in produce_out]
return np.asarray(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = np.asmatrix(UpperCAmelCase_)
a : int = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a : Union[str, Any] = self.pooling(UpperCAmelCase_ , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 356 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = BioGptTokenizer
lowercase = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowercase_ : Optional[int] = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Optional[Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Union[str, Any] = 'lower newer'
lowercase_ : Any = 'lower newer'
return input_text, output_text
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = BioGptTokenizer(self.vocab_file ,self.merges_file )
lowercase_ : Union[str, Any] = 'lower'
lowercase_ : Tuple = ['low', 'er</w>']
lowercase_ : Tuple = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = tokens + ['<unk>']
lowercase_ : Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
lowercase_ : int = tokenizer.encode('sequence builders' ,add_special_tokens=__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
lowercase_ : str = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ,__UpperCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 213 | """simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'generated'
def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Tuple:
'''simple docstring'''
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[Any] = {}
if truncation is not None:
lowercase_ : int = truncation
lowercase_ : Dict = generate_kwargs
lowercase_ : List[Any] = {}
if return_tensors is not None and return_type is None:
lowercase_ : Union[str, Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ : str = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ : Union[str, Any] = self.tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowercase_ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,__UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
lowercase_ : str = ([prefix + arg for arg in args[0]],)
lowercase_ : Union[str, Any] = True
elif isinstance(args[0] ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = (prefix + args[0],)
lowercase_ : Union[str, Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase_ : List[Any] = self.tokenizer(*__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
if (
isinstance(args[0] ,__UpperCamelCase )
and all(isinstance(__UpperCamelCase ,__UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,**__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self._parse_and_tokenize(__UpperCamelCase ,truncation=__UpperCamelCase ,**__UpperCamelCase )
return inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if self.framework == "pt":
lowercase_ , lowercase_ : Optional[int] = model_inputs['input_ids'].shape
elif self.framework == "tf":
lowercase_ , lowercase_ : Union[str, Any] = tf.shape(model_inputs['input_ids'] ).numpy()
lowercase_ : str = generate_kwargs.get('min_length' ,self.model.config.min_length )
lowercase_ : List[Any] = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(__UpperCamelCase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
lowercase_ : Tuple = self.model.generate(**__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : str = output_ids.shape[0]
if self.framework == "pt":
lowercase_ : List[Any] = output_ids.reshape(__UpperCamelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
lowercase_ : List[Any] = tf.reshape(__UpperCamelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=ReturnType.TEXT ,__UpperCamelCase=False ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ : List[Any] = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ : str = {
f'''{self.return_name}_text''': self.tokenizer.decode(
__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ,)
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'summary'
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(lowercase_ )
class UpperCamelCase ( lowercase_ ):
lowercase = 'translation'
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def _UpperCAmelCase ( self ,*__UpperCamelCase ,__UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE ,__UpperCamelCase=None ,__UpperCamelCase=None ) -> int:
'''simple docstring'''
if getattr(self.tokenizer ,'_build_translation_inputs' ,__UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase ,return_tensors=self.framework ,truncation=__UpperCamelCase ,src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase ,truncation=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ : int = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
lowercase_ : str = src_lang
if tgt_lang is not None:
lowercase_ : Optional[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ : Tuple = kwargs.get('task' ,self.task )
lowercase_ : List[str] = task.split('_' )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
lowercase_ : Union[str, Any] = items[1]
lowercase_ : Tuple = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
return super().__call__(*__UpperCamelCase ,**__UpperCamelCase )
| 213 | 1 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowercase : str = datasets.load_iris()
_lowercase : Optional[Any] = np.array(data['data'])
_lowercase : Optional[Any] = np.array(data['target'])
_lowercase : List[Any] = data['target_names']
_lowercase ,_lowercase ,_lowercase ,_lowercase : Union[str, Any] = train_test_split(X, y)
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
return np.linalg.norm(np.array(lowerCAmelCase_ ) - np.array(lowerCAmelCase_ ) )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :Optional[int]=5 ):
__UpperCAmelCase = zip(lowerCAmelCase_ , lowerCAmelCase_ )
# List of distances of all points from the point to be classified
__UpperCAmelCase = []
for data_point in data:
__UpperCAmelCase = euclidean_distance(data_point[0] , lowerCAmelCase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase = [i[1] for i in sorted(lowerCAmelCase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase = Counter(lowerCAmelCase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 368 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : List[Any] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86 | 0 |
"""simple docstring"""
lowerCAmelCase_ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : str = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase__ : Dict = Stack()
lowercase__ : Tuple = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(_SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
lowercase__ : str = operator_stack.peek()
operator_stack.pop()
lowercase__ : Any = operand_stack.peek()
operand_stack.pop()
lowercase__ : str = operand_stack.peek()
operand_stack.pop()
lowercase__ : List[str] = operators[opr](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
operand_stack.push(_SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase_ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 16 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__SCREAMING_SNAKE_CASE : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
with open(_SCREAMING_SNAKE_CASE , """r""" ) as file:
for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = line.strip()
if line:
snake_case_ = line.split()
snake_case_ = line_number
snake_case_ = words[0]
snake_case_ = value
return result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
snake_case_ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = shape_pointer.shape
# let's reduce dimension
snake_case_ = value[0]
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case_ = """.""".join([key, hf_param_name] )
else:
snake_case_ = key
snake_case_ = value if """lm_head""" in full_key else value[0]
__SCREAMING_SNAKE_CASE : int = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
snake_case_ = False
for key, mapped_key in MAPPING.items():
snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = """weight_g"""
elif "weight_v" in name:
snake_case_ = """weight_v"""
elif "bias" in name:
snake_case_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = """weight"""
else:
snake_case_ = None
if hf_dict is not None:
rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_used
return is_used
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ = True
else:
snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = full_name.split("""conv_layers.""" )[-1]
snake_case_ = name.split(""".""" )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int:
if config_path is not None:
snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaConfig()
if is_seq_class:
snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = idalabel
snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , )
snake_case_ = True if config.feat_extract_norm == """layer""" else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task="""audio_pretraining""" )
snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
snake_case_ = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 347 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119 | import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 1_6 ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
lowerCAmelCase__ , padding='longest' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase__ ) == "1":
lowerCAmelCase__ = 2
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['lr']
lowerCAmelCase__ = int(config['num_epochs'] )
lowerCAmelCase__ = int(config['seed'] )
lowerCAmelCase__ = int(config['batch_size'] )
lowerCAmelCase__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase__ = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
def __lowerCamelCase ( ):
lowerCAmelCase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 119 | 1 |
import math
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = f"Input value of [number={number}] must be an integer"
raise TypeError(_UpperCAmelCase )
if number < 1:
SCREAMING_SNAKE_CASE_: Tuple = f"Input value of [number={number}] must be > 0"
raise ValueError(_UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE_: Dict = int(math.log(number // 3 , 2 ) ) + 2
SCREAMING_SNAKE_CASE_: Tuple = [3, 5]
SCREAMING_SNAKE_CASE_: int = 2
SCREAMING_SNAKE_CASE_: int = 3
for block in range(1 , _UpperCAmelCase ):
for _ in range(_UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowerCAmelCase : List[str] = 0
try:
lowerCAmelCase : List[str] = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 13 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 | 1 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase_ :
def __init__( self : Dict , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any]=100 , snake_case_ : Any=13 , snake_case_ : Optional[int]=30 , snake_case_ : str=2 , snake_case_ : int=3 , snake_case_ : Optional[int]=True , snake_case_ : int=True , snake_case_ : Optional[Any]=32 , snake_case_ : str=4 , snake_case_ : List[Any]=4 , snake_case_ : Any=37 , snake_case_ : List[str]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : int=0.1 , snake_case_ : Tuple=10 , snake_case_ : Any=0.02 , snake_case_ : Any=3 , snake_case_ : Tuple=None , snake_case_ : str=[0, 1, 2, 3] , ) -> int:
'''simple docstring'''
A__ = parent
A__ = 100
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = out_indices
A__ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def __magic_name__ ( self : Dict ) -> int:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : int ) -> str:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __magic_name__ ( self : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int ) -> List[str]:
'''simple docstring'''
A__ = BeitModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict ) -> Any:
'''simple docstring'''
A__ = BeitForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __magic_name__ ( self : Dict , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = BeitForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = BeitForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : str , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = self.num_labels
A__ = BeitForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A__ = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__, A__, A__, A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( UpperCamelCase__, UpperCamelCase__, unittest.TestCase ):
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
A__ = BeitModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def __magic_name__ ( self : Dict ) -> int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def __magic_name__ ( self : str ) -> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCamelCase_ ), BeitForMaskedImageModeling]:
continue
A__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
A__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
A__ = model(**UpperCamelCase_ ).loss
loss.backward()
def __magic_name__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A__ = False
A__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCamelCase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A__ = model_class(UpperCamelCase_ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase_ )
model.train()
A__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
A__ = model(**UpperCamelCase_ ).loss
loss.backward()
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
A__ = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __magic_name__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = BeitModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
A__ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(UpperCamelCase_ )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).pixel_values.to(UpperCamelCase_ )
# prepare bool_masked_pos
A__ = torch.ones((1, 196) , dtype=torch.bool ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
A__ = model(pixel_values=UpperCamelCase_ , bool_masked_pos=UpperCamelCase_ )
A__ = outputs.logits
# verify the logits
A__ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , UpperCamelCase_ )
A__ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase_ , atol=1e-2 ) )
@slow
def __magic_name__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
A__ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(UpperCamelCase_ )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits
# verify the logits
A__ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , UpperCamelCase_ )
A__ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
A__ = 281
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase_ )
@slow
def __magic_name__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
UpperCamelCase_ )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits
# verify the logits
A__ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , UpperCamelCase_ )
A__ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
A__ = 2_396
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase_ )
@slow
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
A__ = model.to(UpperCamelCase_ )
A__ = BeitImageProcessor(do_resize=UpperCamelCase_ , size=640 , do_center_crop=UpperCamelCase_ )
A__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
A__ = Image.open(ds[0]["file"] )
A__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits
# verify the logits
A__ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , UpperCamelCase_ )
A__ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
A__ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=UpperCamelCase_ , )
else:
A__ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
A__ = model.to(UpperCamelCase_ )
A__ = BeitImageProcessor(do_resize=UpperCamelCase_ , size=640 , do_center_crop=UpperCamelCase_ )
A__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
A__ = Image.open(ds[0]["file"] )
A__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(500, 300)] )
A__ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ )
A__ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
| 355 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''lilt'''
def __init__( self : List[str] , snake_case_ : Any=30_522 , snake_case_ : Optional[Any]=768 , snake_case_ : Union[str, Any]=12 , snake_case_ : Union[str, Any]=12 , snake_case_ : Any=3_072 , snake_case_ : List[str]="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Any=512 , snake_case_ : Optional[Any]=2 , snake_case_ : List[str]=0.02 , snake_case_ : Optional[Any]=1e-12 , snake_case_ : List[Any]=0 , snake_case_ : Any="absolute" , snake_case_ : str=None , snake_case_ : int=4 , snake_case_ : int=1_024 , **snake_case_ : Tuple , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = classifier_dropout
A__ = channel_shrink_ratio
A__ = max_ad_position_embeddings
| 230 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Optional[int] = 'convnextv2'
def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Dict = num_channels
UpperCamelCase : Union[str, Any] = patch_size
UpperCamelCase : Union[str, Any] = num_stages
UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : str = drop_path_rate
UpperCamelCase : List[str] = image_size
UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 52 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase : List[str] = logging.get_logger(__name__)
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowercase) -> Tuple:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a__ : Union[str, Any] = deprecated_arg[3:]
setattr(self , lowercase , not kwargs.pop(lowercase))
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}')
a__ : Union[str, Any] = kwargs.pop('torchscript' , self.torchscript)
a__ : Tuple = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics)
a__ : Tuple = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level)
super().__init__(**lowercase)
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Trace the models using torchscript'''} )
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
__A : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def __lowercase ( self) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ['torch'])
logger.info('PyTorch: setting up devices')
if not self.cuda:
a__ : List[str] = torch.device('cpu')
a__ : Optional[Any] = 0
elif is_torch_tpu_available():
a__ : List[str] = xm.xla_device()
a__ : Union[str, Any] = 0
else:
a__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
a__ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __lowercase ( self) -> int:
'''simple docstring'''
requires_backends(self , ['torch'])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowercase ( self) -> "torch.device":
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[0]
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[1]
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return self.n_gpu > 0
| 99 | 0 |
from __future__ import annotations
def snake_case_(_UpperCamelCase ) -> bool:
"""simple docstring"""
_snake_case = str(_UpperCamelCase )
return len(_UpperCamelCase ) == 9 and set(_UpperCamelCase ) == set('''123456789''' )
def snake_case_() -> int | None:
"""simple docstring"""
for base_num in range(9_999 , 4_999 , -1 ):
_snake_case = 100_002 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_snake_case = 1_002_003 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 278 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278 | 1 |
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
__a = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__a = 1
if upper_limit > 0:
__a = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowerCamelCase__ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 302 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302 | 1 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = split_dict._to_yaml_list()
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = SplitDict._from_yaml_list(_lowerCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCamelCase : List[str] = None
# the split name of split_dict takes over the name of the split info object
UpperCamelCase : Dict = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=_lowerCAmelCase ), SplitInfo(dataset_name="my_dataset" )] )
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : str = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 355 |
import os
def A_ ( ) -> int:
UpperCamelCase : List[str] = os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
UpperCamelCase : Any = os.path.join(_lowerCAmelCase , "triangle.txt" )
with open(_lowerCAmelCase ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
UpperCamelCase : Tuple = []
for line in triangle:
UpperCamelCase : List[str] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(_lowerCAmelCase ) )
a.append(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
for j in range(len(a[i] ) ):
UpperCamelCase : List[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCamelCase : Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_lowerCAmelCase , _lowerCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 140 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__A = logging.getLogger()
def a__ ( ) -> Tuple:
__lowerCAmelCase: List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__lowerCAmelCase: Optional[Any] = parser.parse_args()
return args.f
def a__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase: List[Any] = {}
__lowerCAmelCase: str = os.path.join(__SCREAMING_SNAKE_CASE , "all_results.json" )
if os.path.exists(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , "r" ) as f:
__lowerCAmelCase: List[str] = json.load(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"can't find {path}" )
return results
def a__ ( ) -> Dict:
__lowerCAmelCase: List[str] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
__A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case ( __snake_case ):
@classmethod
def lowercase_ ( cls : Optional[int])-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = tempfile.mkdtemp()
__lowerCAmelCase: Any = os.path.join(cls.tmpdir , "default_config.yml")
write_basic_config(save_location=cls.configPath)
__lowerCAmelCase: Any = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowercase_ ( cls : Tuple)-> List[str]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir)
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"})
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase: Union[str, Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
run_command(self._launch_args + testargs)
__lowerCAmelCase: Any = get_results(UpperCamelCase__)
self.assertGreaterEqual(result["eval_accuracy"] , 0.75)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "glue_no_trainer")))
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"})
def lowercase_ ( self : Tuple)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.get_auto_remove_tmp_dir()
__lowerCAmelCase: Optional[int] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs)
__lowerCAmelCase: str = get_results(UpperCamelCase__)
self.assertLess(result["perplexity"] , 1_0_0)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "clm_no_trainer")))
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"})
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase: str = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
__lowerCAmelCase: Dict = get_results(UpperCamelCase__)
self.assertLess(result["perplexity"] , 4_2)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "mlm_no_trainer")))
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"})
def lowercase_ ( self : str)-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = 7 if get_gpu_count() > 1 else 2
__lowerCAmelCase: int = self.get_auto_remove_tmp_dir()
__lowerCAmelCase: Union[str, Any] = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
__lowerCAmelCase: int = get_results(UpperCamelCase__)
self.assertGreaterEqual(result["eval_accuracy"] , 0.75)
self.assertLess(result["train_loss"] , 0.5)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "ner_no_trainer")))
@unittest.skip(reason="Fix me @muellerzr")
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"})
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase: Optional[Any] = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
__lowerCAmelCase: List[Any] = get_results(UpperCamelCase__)
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8)
self.assertGreaterEqual(result["eval_exact"] , 2_8)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "qa_no_trainer")))
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"})
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase: Dict = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
__lowerCAmelCase: Any = get_results(UpperCamelCase__)
self.assertGreaterEqual(result["eval_accuracy"] , 0.8)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "swag_no_trainer")))
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"})
def lowercase_ ( self : Dict)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase: Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
__lowerCAmelCase: Any = get_results(UpperCamelCase__)
self.assertGreaterEqual(result["eval_rouge1"] , 1_0)
self.assertGreaterEqual(result["eval_rouge2"] , 2)
self.assertGreaterEqual(result["eval_rougeL"] , 7)
self.assertGreaterEqual(result["eval_rougeLsum"] , 7)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "summarization_no_trainer")))
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"})
def lowercase_ ( self : str)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase: Union[str, Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
__lowerCAmelCase: int = get_results(UpperCamelCase__)
self.assertGreaterEqual(result["eval_bleu"] , 3_0)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "translation_no_trainer")))
@slow
def lowercase_ ( self : Tuple)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = logging.StreamHandler(sys.stdout)
logger.addHandler(UpperCamelCase__)
__lowerCAmelCase: Any = self.get_auto_remove_tmp_dir()
__lowerCAmelCase: Dict = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs)
__lowerCAmelCase: Optional[Any] = get_results(UpperCamelCase__)
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10)
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"})
def lowercase_ ( self : List[Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.get_auto_remove_tmp_dir()
__lowerCAmelCase: Union[str, Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
run_command(self._launch_args + testargs)
__lowerCAmelCase: Optional[int] = get_results(UpperCamelCase__)
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6)
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "step_1")))
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "image_classification_no_trainer")))
| 217 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class snake_case ( __snake_case ):
# to overwrite at feature extractactor specific tests
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
@property
def lowercase_ ( self : str)-> Any:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "feature_size"))
self.assertTrue(hasattr(UpperCamelCase__ , "sampling_rate"))
self.assertTrue(hasattr(UpperCamelCase__ , "padding_value"))
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: str = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: int = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(UpperCamelCase__) == len(UpperCamelCase__) for x, y in zip(UpperCamelCase__ , processed_features[input_name])))
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="np")
__lowerCAmelCase: Optional[int] = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: int = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs} , tensor_type="pt")
__lowerCAmelCase: str = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="tf")
__lowerCAmelCase: int = processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowerCAmelCase: Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str]=False)-> Optional[int]:
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase__ : Tuple):
__lowerCAmelCase: Optional[int] = len(input[0])
for input_slice in input[1:]:
if len(UpperCamelCase__) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase__ : str , UpperCamelCase__ : str):
if len(UpperCamelCase__) != len(UpperCamelCase__):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase__ , UpperCamelCase__):
if not np.allclose(np.asarray(UpperCamelCase__) , np.asarray(UpperCamelCase__) , atol=1e-3):
return False
return True
__lowerCAmelCase: List[str] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase__)
__lowerCAmelCase: Tuple = feat_extract.model_input_names[0]
__lowerCAmelCase: List[Any] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Optional[int] = self.feat_extract_tester.seq_length_diff
__lowerCAmelCase: str = self.feat_extract_tester.max_seq_length + pad_diff
__lowerCAmelCase: Tuple = self.feat_extract_tester.min_seq_length
__lowerCAmelCase: Union[str, Any] = self.feat_extract_tester.batch_size
__lowerCAmelCase: Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowerCAmelCase: Optional[int] = feat_extract.pad(UpperCamelCase__ , padding=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(UpperCamelCase__ , padding="longest")
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[-1]))
__lowerCAmelCase: Optional[Any] = input_a[input_name]
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")
__lowerCAmelCase: Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="max_length")[input_name]
__lowerCAmelCase: Any = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , return_tensors="np")
__lowerCAmelCase: Optional[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , pad_to_multiple_of=1_0)
__lowerCAmelCase: Tuple = input_a[input_name]
__lowerCAmelCase: Tuple = feat_extract.pad(UpperCamelCase__ , padding="longest" , pad_to_multiple_of=1_0)
__lowerCAmelCase: List[str] = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=UpperCamelCase__)
__lowerCAmelCase: str = input_a[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , pad_to_multiple_of=1_0 , max_length=UpperCamelCase__ , return_tensors="np" , )
__lowerCAmelCase: List[str] = input_a[input_name]
self.assertTrue(all(len(UpperCamelCase__) % 1_0 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
__lowerCAmelCase: Optional[Any] = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(UpperCamelCase__) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
__lowerCAmelCase: Any = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int]=False)-> str:
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase__ : List[Any]):
__lowerCAmelCase: List[str] = len(input[0])
for input_slice in input[1:]:
if len(UpperCamelCase__) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]):
if len(UpperCamelCase__) != len(UpperCamelCase__):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase__ , UpperCamelCase__):
if not np.allclose(np.asarray(UpperCamelCase__) , np.asarray(UpperCamelCase__) , atol=1e-3):
return False
return True
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase__)
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
# truncate to smallest
__lowerCAmelCase: int = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , truncation=UpperCamelCase__)
__lowerCAmelCase: Any = input_a[input_name]
__lowerCAmelCase: str = feat_extract.pad(UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]))
__lowerCAmelCase: Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
# truncate to smallest with np
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np" , truncation=UpperCamelCase__ , )
__lowerCAmelCase: List[str] = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , return_tensors="np")
__lowerCAmelCase: Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
# truncate to middle
__lowerCAmelCase: Union[str, Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=UpperCamelCase__ , return_tensors="np" , )
__lowerCAmelCase: int = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , truncation=UpperCamelCase__)
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: List[Any] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[1]) , return_tensors="np")
__lowerCAmelCase: str = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(_inputs_are_equal(UpperCamelCase__ , UpperCamelCase__))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , truncation=UpperCamelCase__)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="longest" , truncation=UpperCamelCase__)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="longest" , truncation=UpperCamelCase__)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCamelCase__):
feat_extract.pad(UpperCamelCase__ , padding="max_length" , truncation=UpperCamelCase__)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase: Tuple = 1_2
__lowerCAmelCase: Optional[int] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=UpperCamelCase__ , truncation=UpperCamelCase__ , )
__lowerCAmelCase: Dict = input_a[input_name]
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=len(speech_inputs[0]) , pad_to_multiple_of=UpperCamelCase__ , )
__lowerCAmelCase: str = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowerCAmelCase: Optional[Any] = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
__lowerCAmelCase: Optional[Any] = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(UpperCamelCase__))
self.assertFalse(_inputs_have_equal_length(UpperCamelCase__))
def lowercase_ ( self : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase__)
def lowercase_ ( self : List[Any])-> Tuple:
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase__)
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase__)
@require_torch
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: str = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: str = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="pt")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
@require_tf
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.feature_extraction_class(**self.feat_extract_dict)
__lowerCAmelCase: Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: List[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: Any = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Dict = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")[input_name]
__lowerCAmelCase: Any = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="tf")[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2)
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: Any = True
__lowerCAmelCase: str = self.feature_extraction_class(**UpperCamelCase__)
__lowerCAmelCase: List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: Tuple = [len(UpperCamelCase__) for x in speech_inputs]
__lowerCAmelCase: Optional[Any] = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: Optional[Any] = feat_extract.pad(UpperCamelCase__ , padding="longest" , return_tensors="np")
self.assertIn("attention_mask" , UpperCamelCase__)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , UpperCamelCase__)
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
__lowerCAmelCase: Any = self.feat_extract_dict
__lowerCAmelCase: str = True
__lowerCAmelCase: int = self.feature_extraction_class(**UpperCamelCase__)
__lowerCAmelCase: int = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase: str = [len(UpperCamelCase__) for x in speech_inputs]
__lowerCAmelCase: Any = feat_extract.model_input_names[0]
__lowerCAmelCase: Optional[int] = BatchFeature({input_name: speech_inputs})
__lowerCAmelCase: List[str] = min(UpperCamelCase__)
__lowerCAmelCase: List[str] = feat_extract.pad(
UpperCamelCase__ , padding="max_length" , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors="np")
self.assertIn("attention_mask" , UpperCamelCase__)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 217 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ = 10_00 ) -> int:
A__ = 3
A__ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 230 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = model.config
A__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
A__ = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
if "encoder.model" in name:
A__ = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
A__ = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
A__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
A__ = "encoder." + name
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
A__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
A__ = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
A__ = "encoder.layernorm.bias"
return name
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[3] )
A__ = int(key_split[5] )
A__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False ) -> Dict:
# load original model
A__ = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
A__, A__ = get_configs(lowercase_ )
A__ = DonutSwinModel(lowercase_ )
A__ = MBartForCausalLM(lowercase_ )
A__ = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
A__ = original_model.state_dict()
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
A__ = load_dataset("hf-internal-testing/example-documents" )
A__ = dataset["test"][0]["image"].convert("RGB" )
A__ = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
A__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A__ = DonutProcessor(lowercase_ , lowercase_ )
A__ = processor(lowercase_ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A__ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
A__ = "When is the coffee break?"
A__ = task_prompt.replace("{user_input}" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A__ = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A__ = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A__ = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A__ = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A__ = "hello world"
else:
raise ValueError("Model name not supported" )
A__ = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="pt" )[
"input_ids"
]
A__ = original_model.encoder.model.patch_embed(lowercase_ )
A__, A__ = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
A__ = original_model.encoder(lowercase_ )
A__ = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
A__ = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
A__ = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 230 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase :
def __init__( self : str , __snake_case : Any ) -> str:
_lowerCAmelCase = str(id_ )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = []
_lowerCAmelCase = {} # {vertex:distance}
def __lt__( self : List[str] , __snake_case : Union[str, Any] ) -> Any:
return self.key < other.key
def __repr__( self : Optional[Any] ) -> Optional[Any]:
return self.id
def lowercase__ ( self : Union[str, Any] , __snake_case : Tuple ) -> Optional[Any]:
self.neighbors.append(__snake_case )
def lowercase__ ( self : Tuple , __snake_case : List[str] , __snake_case : Tuple ) -> Any:
_lowerCAmelCase = weight
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = graph[:]
while q:
_lowerCAmelCase = min(lowerCAmelCase )
q.remove(lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = list(lowerCAmelCase )
hq.heapify(lowerCAmelCase )
while h:
_lowerCAmelCase = hq.heappop(lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
hq.heapify(lowerCAmelCase )
for i in range(1 , len(lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase__ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | 0 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class snake_case_ (__UpperCamelCase ):
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : Optional[torch.FloatTensor] = None
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int=0.999 , __lowerCAmelCase : List[str]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase : Dict ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
a__ = []
for i in range(__SCREAMING_SNAKE_CASE ):
a__ = i / num_diffusion_timesteps
a__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__SCREAMING_SNAKE_CASE ) / alpha_bar_fn(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) )
return torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class snake_case_ (__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Any = 1
@register_to_config
def __init__( self :Optional[Any] ,__snake_case :int = 10_00 ,__snake_case :float = 0.00_01 ,__snake_case :float = 0.02 ,__snake_case :str = "linear" ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :int = 0 ,__snake_case :str = "epsilon" ,__snake_case :float = 1.0 ,**__snake_case :List[Any] ,) -> Union[str, Any]:
if kwargs.get('set_alpha_to_one' ,_lowerCAmelCase ) is not None:
a__ = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate('set_alpha_to_one' ,'1.0.0' ,_lowerCAmelCase ,standard_warn=_lowerCAmelCase )
a__ = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
a__ = torch.tensor(_lowerCAmelCase ,dtype=torch.floataa )
elif beta_schedule == "linear":
a__ = torch.linspace(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_lowerCAmelCase ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
a__ = 1.0 - self.betas
a__ = torch.cumprod(self.alphas ,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
a__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
a__ = 1.0
# setable values
a__ = None
a__ = torch.from_numpy(np.arange(0 ,_lowerCAmelCase ).copy().astype(np.intaa ) )
def lowerCamelCase__( self :List[Any] ,__snake_case :torch.FloatTensor ,__snake_case :Optional[int] = None ) -> List[Any]:
return sample
def lowerCamelCase__( self :Tuple ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.' )
a__ = num_inference_steps
a__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ = (np.arange(0 ,_lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
a__ = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def lowerCamelCase__( self :Optional[int] ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :float = 0.0 ,__snake_case :bool = False ,__snake_case :Optional[torch.FloatTensor] = None ,__snake_case :bool = True ,) -> List[str]:
# 1. get previous step value (=t+1)
a__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
a__ = self.alphas_cumprod[timestep]
a__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
a__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
a__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
a__ = model_output
elif self.config.prediction_type == "sample":
a__ = model_output
a__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
a__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
a__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
a__ = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase ,pred_original_sample=_lowerCAmelCase )
def __len__( self :int ) -> Tuple:
return self.config.num_train_timesteps
| 370 |
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowercase ( __lowerCAmelCase : dict[int, list[int]] ):
a__ = 0
a__ = len(__lowerCAmelCase ) # No of vertices in graph
a__ = [0] * n
a__ = [False] * n
def dfs(__lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
a__ = True
a__ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
a__ = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
a__ = min(low[at] , low[to] )
a__ = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_lowerCamelCase ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_lowerCamelCase ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_lowerCamelCase )
return parser.parse_args()
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : List[Any] = parse_args()
# Import training_script as a module.
_lowerCAmelCase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowerCAmelCase : Union[str, Any] = script_fpath.stem
_lowerCAmelCase : Optional[Any] = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
_lowerCAmelCase : Tuple = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 44 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = """Usage of script: script_name <size_of_canvas:int>"""
lowercase_ = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowerCamelCase ( __lowerCamelCase : int ) ->list[list[bool]]:
_SCREAMING_SNAKE_CASE = [[False for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )]
return canvas
def lowerCamelCase ( __lowerCamelCase : list[list[bool]] ) ->None:
for i, row in enumerate(__lowerCamelCase ):
for j, _ in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = bool(random.getrandbits(1 ) )
def lowerCamelCase ( __lowerCamelCase : list[list[bool]] ) ->list[list[bool]]:
_SCREAMING_SNAKE_CASE = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCamelCase ):
for c, pt in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = __judge_point(
__lowerCamelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_SCREAMING_SNAKE_CASE = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_SCREAMING_SNAKE_CASE = current_canvas.tolist()
return return_canvas
def lowerCamelCase ( __lowerCamelCase : bool , __lowerCamelCase : list[list[bool]] ) ->bool:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_SCREAMING_SNAKE_CASE = pt
if pt:
if alive < 2:
_SCREAMING_SNAKE_CASE = False
elif alive == 2 or alive == 3:
_SCREAMING_SNAKE_CASE = True
elif alive > 3:
_SCREAMING_SNAKE_CASE = False
else:
if alive == 3:
_SCREAMING_SNAKE_CASE = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["""w""", """k"""])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 58 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "big_bird"
def __init__( self : Dict , lowercase : Optional[Any]=50_358 , lowercase : int=768 , lowercase : Tuple=12 , lowercase : Union[str, Any]=12 , lowercase : Tuple=3_072 , lowercase : Any="gelu_new" , lowercase : int=0.1 , lowercase : str=0.1 , lowercase : Dict=4_096 , lowercase : Optional[Any]=2 , lowercase : Optional[int]=0.02 , lowercase : List[str]=1E-12 , lowercase : List[str]=True , lowercase : Dict=0 , lowercase : Any=1 , lowercase : Optional[int]=2 , lowercase : Optional[Any]=66 , lowercase : Tuple="block_sparse" , lowercase : Dict=True , lowercase : Tuple=False , lowercase : str=64 , lowercase : Tuple=3 , lowercase : Tuple=None , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , sep_token_id=lowercase , **lowercase , )
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
_snake_case = use_cache
_snake_case = rescale_embeddings
_snake_case = attention_type
_snake_case = use_bias
_snake_case = block_size
_snake_case = num_random_blocks
_snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@property
def A ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 356 |
from functools import lru_cache
@lru_cache
def a_ ( __lowercase : int ) -> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 130 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 270 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_A : Any = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_A : str = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_A : List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCamelCase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def __lowerCamelCase ( self : Optional[int] , A : Dict , A : List[Any] , A : Tuple=None , A : Any=True , A : List[Any]=False ):
if rouge_types is None:
lowerCamelCase__ : Dict = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowerCamelCase__ : Dict = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase_ , use_stemmer=UpperCamelCase_ )
if use_aggregator:
lowerCamelCase__ : str = scoring.BootstrapAggregator()
else:
lowerCamelCase__ : Optional[Any] = []
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase__ : Optional[Any] = scorer.score(UpperCamelCase_ , UpperCamelCase_ )
if use_aggregator:
aggregator.add_scores(UpperCamelCase_ )
else:
scores.append(UpperCamelCase_ )
if use_aggregator:
lowerCamelCase__ : str = aggregator.aggregate()
else:
lowerCamelCase__ : List[str] = {}
for key in scores[0]:
lowerCamelCase__ : int = [score[key] for score in scores]
return result
| 355 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_A : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCAmelCase : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_UpperCAmelCase : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_UpperCAmelCase : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCamelCase ( self : str , A : List[str] , A : Tuple , A : str ) ->List[str]:
lowerCamelCase__ : List[Any] = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCamelCase ( self : List[str] , A : Union[str, Any] , A : Optional[Any] ) ->Union[str, Any]:
lowerCamelCase__ : Any = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
# No kwarg
lowerCamelCase__ : str = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
lowerCamelCase__ : Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
lowerCamelCase__ : Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
A , {'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase__ : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
A , {'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase__ : str = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
lowerCamelCase__ : Optional[int] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
A , [
{'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
lowerCamelCase__ : Any = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
A , [
{'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(A ):
classifier(A , candidate_labels='''politics''' )
with self.assertRaises(A ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(A ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=A )
with self.assertRaises(A ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(A ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=A , )
self.run_entailment_id(A )
def __lowerCamelCase ( self : Optional[int] , A : Pipeline ) ->Dict:
lowerCamelCase__ : str = zero_shot_classifier.model.config
lowerCamelCase__ : Optional[int] = config.labelaid
lowerCamelCase__ : Tuple = zero_shot_classifier.entailment_id
lowerCamelCase__ : Optional[Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowerCamelCase__ : Optional[int] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase__ : List[str] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase__ : Optional[Any] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowerCamelCase__ : Tuple = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_0_0 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : List[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
lowerCamelCase__ : Dict = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def __lowerCamelCase ( self : str ) ->List[str]:
lowerCamelCase__ : Any = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
lowerCamelCase__ : Optional[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : int = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
lowerCamelCase__ : int = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowerCamelCase__ : Any = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase__ : List[Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
lowerCamelCase__ : Dict = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowerCamelCase__ : Union[str, Any] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 265 | 0 |
"""simple docstring"""
def lowercase ( A_ , A_ , A_ )-> int:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_A , n - 1 , _A ) * a) % mod
else:
a : Optional[int] = binary_exponentiation(_A , n / 2 , _A )
return (b * b) % mod
# a prime number
__lowercase = 701
__lowercase = 1000000000
__lowercase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 40 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {"""vocab_file""": """spiece.model"""}
__UpperCamelCase : Any = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Tuple = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
__UpperCamelCase : Optional[Any] = """▁"""
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any]="</s>" , UpperCamelCase: Tuple="<unk>" , UpperCamelCase: Optional[int]="<pad>" , UpperCamelCase: List[str]=1_00 , UpperCamelCase: Dict=None , UpperCamelCase: Optional[Dict[str, Any]] = None , UpperCamelCase: Tuple=True , **UpperCamelCase: Dict , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case__ = len(set(filter(lambda UpperCamelCase : bool('extra_id' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
snake_case__ = legacy
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase , **UpperCamelCase , )
snake_case__ = vocab_file
snake_case__ = extra_ids
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] ) -> Any:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCamelCase , )
return max_model_length
@property
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase )) + [1]
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'<extra_id_\d+>' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
return [self._convert_token_to_id(UpperCamelCase ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[int] ) -> List[int]:
if len(UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase_ ( self: str , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
snake_case__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
snake_case__ = self._add_eos_if_not_present(UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
snake_case__ = self._add_eos_if_not_present(UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self: Union[str, Any] ) -> List[str]:
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self: Optional[int] , UpperCamelCase: int ) -> List[str]:
snake_case__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self: str , UpperCamelCase: "TextInput" , **UpperCamelCase: Dict ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
snake_case__ = SPIECE_UNDERLINE + text.replace(UpperCamelCase , ' ' )
return super().tokenize(UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , **UpperCamelCase: str ) -> str:
if not self.legacy:
snake_case__ = text.startswith(UpperCamelCase )
if is_first:
snake_case__ = text[1:]
snake_case__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(UpperCamelCase ):
snake_case__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] ) -> Dict:
if token.startswith('<extra_id_' ):
snake_case__ = re.match(R'<extra_id_(\d+)>' , UpperCamelCase )
snake_case__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCamelCase )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: str ) -> Tuple:
if index < self.sp_model.get_piece_size():
snake_case__ = self.sp_model.IdToPiece(UpperCamelCase )
else:
snake_case__ = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any ) -> Dict:
snake_case__ = []
snake_case__ = ''
snake_case__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase ) + token
snake_case__ = True
snake_case__ = []
else:
current_sub_tokens.append(UpperCamelCase )
snake_case__ = False
out_string += self.sp_model.decode(UpperCamelCase )
return out_string.strip()
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ = os.path.join(
UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , 'wb' ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
| 307 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( a_ : List[str] , a_ : Union[str, Any] , a_ : Optional[Any] ) -> tuple[float, list[float]]:
__SCREAMING_SNAKE_CASE :Optional[Any] = list(range(len(a_ ) ) )
__SCREAMING_SNAKE_CASE :Dict = [v / w for v, w in zip(a_ , a_ )]
index.sort(key=lambda a_ : ratio[i] , reverse=a_ )
__SCREAMING_SNAKE_CASE :str = 0
__SCREAMING_SNAKE_CASE :List[Any] = [0] * len(a_ )
for i in index:
if weight[i] <= capacity:
__SCREAMING_SNAKE_CASE :Optional[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
__SCREAMING_SNAKE_CASE :Dict = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 364 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _SCREAMING_SNAKE_CASE( A ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,) -> Optional[int]:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE :Any = value_function
__SCREAMING_SNAKE_CASE :List[str] = unet
__SCREAMING_SNAKE_CASE :int = scheduler
__SCREAMING_SNAKE_CASE :Optional[int] = env
__SCREAMING_SNAKE_CASE :Optional[int] = env.get_dataset()
__SCREAMING_SNAKE_CASE :str = {}
for key in self.data.keys():
try:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
__SCREAMING_SNAKE_CASE :Dict = {}
for key in self.data.keys():
try:
__SCREAMING_SNAKE_CASE :str = self.data[key].std()
except: # noqa: E722
pass
__SCREAMING_SNAKE_CASE :Optional[int] = env.observation_space.shape[0]
__SCREAMING_SNAKE_CASE :int = env.action_space.shape[0]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
if type(SCREAMING_SNAKE_CASE__ ) is dict:
return {k: self.to_torch(SCREAMING_SNAKE_CASE__ ) for k, v in x_in.items()}
elif torch.is_tensor(SCREAMING_SNAKE_CASE__ ):
return x_in.to(self.unet.device )
return torch.tensor(SCREAMING_SNAKE_CASE__ ,device=self.unet.device )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
for key, val in cond.items():
__SCREAMING_SNAKE_CASE :Dict = val.clone()
return x_in
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = x.shape[0]
__SCREAMING_SNAKE_CASE :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
__SCREAMING_SNAKE_CASE :Tuple = torch.full((batch_size,) ,SCREAMING_SNAKE_CASE__ ,device=self.unet.device ,dtype=torch.long )
for _ in range(SCREAMING_SNAKE_CASE__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__SCREAMING_SNAKE_CASE :str = self.value_function(x.permute(0 ,2 ,1 ) ,SCREAMING_SNAKE_CASE__ ).sample
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.autograd.grad([y.sum()] ,[x] )[0]
__SCREAMING_SNAKE_CASE :int = self.scheduler._get_variance(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.exp(0.5 * posterior_variance )
__SCREAMING_SNAKE_CASE :List[str] = model_std * grad
__SCREAMING_SNAKE_CASE :Dict = 0
__SCREAMING_SNAKE_CASE :List[Any] = x.detach()
__SCREAMING_SNAKE_CASE :Union[str, Any] = x + scale * grad
__SCREAMING_SNAKE_CASE :Any = self.reset_xa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.action_dim )
__SCREAMING_SNAKE_CASE :Optional[int] = self.unet(x.permute(0 ,2 ,1 ) ,SCREAMING_SNAKE_CASE__ ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
__SCREAMING_SNAKE_CASE :Optional[int] = self.scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,predict_epsilon=SCREAMING_SNAKE_CASE__ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
__SCREAMING_SNAKE_CASE :List[str] = self.reset_xa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.action_dim )
__SCREAMING_SNAKE_CASE :Dict = self.to_torch(SCREAMING_SNAKE_CASE__ )
return x, y
def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.1 ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = self.normalize(SCREAMING_SNAKE_CASE__ ,'''observations''' )
__SCREAMING_SNAKE_CASE :List[Any] = obs[None].repeat(SCREAMING_SNAKE_CASE__ ,axis=0 )
__SCREAMING_SNAKE_CASE :str = {0: self.to_torch(SCREAMING_SNAKE_CASE__ )}
__SCREAMING_SNAKE_CASE :Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__SCREAMING_SNAKE_CASE :Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE__ ,device=self.unet.device )
__SCREAMING_SNAKE_CASE :Tuple = self.reset_xa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.action_dim )
__SCREAMING_SNAKE_CASE :Any = self.to_torch(SCREAMING_SNAKE_CASE__ )
# run the diffusion process
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Tuple = self.run_diffusion(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# sort output trajectories by value
__SCREAMING_SNAKE_CASE :Any = y.argsort(0 ,descending=SCREAMING_SNAKE_CASE__ ).squeeze()
__SCREAMING_SNAKE_CASE :Any = x[sorted_idx]
__SCREAMING_SNAKE_CASE :str = sorted_values[:, :, : self.action_dim]
__SCREAMING_SNAKE_CASE :Union[str, Any] = actions.detach().cpu().numpy()
__SCREAMING_SNAKE_CASE :Optional[int] = self.de_normalize(SCREAMING_SNAKE_CASE__ ,key='''actions''' )
# select the action with the highest value
if y is not None:
__SCREAMING_SNAKE_CASE :Optional[int] = 0
else:
# if we didn't run value guiding, select a random action
__SCREAMING_SNAKE_CASE :Any = np.random.randint(0 ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = denorm_actions[selected_index, 0]
return denorm_actions | 239 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 141 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCamelCase ( lowercase__ : Optional[int], lowercase__ : str, lowercase__ : int ):
'''simple docstring'''
__lowercase =OmegaConf.load(lowercase__ )
__lowercase =torch.load(lowercase__, map_location='cpu' )['model']
__lowercase =list(state_dict.keys() )
# extract state_dict for VQVAE
__lowercase ={}
__lowercase ='first_stage_model.'
for key in keys:
if key.startswith(lowercase__ ):
__lowercase =state_dict[key]
# extract state_dict for UNetLDM
__lowercase ={}
__lowercase ='model.diffusion_model.'
for key in keys:
if key.startswith(lowercase__ ):
__lowercase =state_dict[key]
__lowercase =config.model.params.first_stage_config.params
__lowercase =config.model.params.unet_config.params
__lowercase =VQModel(**lowercase__ ).eval()
vqvae.load_state_dict(lowercase__ )
__lowercase =UNetLDMModel(**lowercase__ ).eval()
unet.load_state_dict(lowercase__ )
__lowercase =DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule='scaled_linear', beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=lowercase__, )
__lowercase =LDMPipeline(lowercase__, lowercase__, lowercase__ )
pipeline.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 141 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : List[str] = XCLIPTextConfig()
# derive patch size from model name
lowercase : List[Any] = model_name.find("""patch""" )
lowercase : List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
lowercase : Tuple = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE__ , num_frames=SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
lowercase : Optional[Any] = 768
lowercase : Any = 3_072
lowercase : Optional[int] = 12
lowercase : Tuple = 1_024
lowercase : Optional[int] = 4_096
lowercase : Any = 16
lowercase : Optional[Any] = 24
lowercase : Union[str, Any] = 768
lowercase : Any = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowercase : Optional[Any] = 336
lowercase : Tuple = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "large" in model_name:
lowercase : Tuple = 768
return config
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
# text encoder
if name == "token_embedding.weight":
lowercase : List[str] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
lowercase : Optional[int] = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
lowercase : Dict = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase : Optional[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase : Any = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase : Union[str, Any] = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
lowercase : Optional[int] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
lowercase : Union[str, Any] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
lowercase : Optional[Any] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
lowercase : Tuple = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
lowercase : int = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
lowercase : str = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
lowercase : Union[str, Any] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
lowercase : Tuple = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
lowercase : Optional[Any] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
lowercase : Tuple = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
lowercase : List[Any] = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
lowercase : List[Any] = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
lowercase : int = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
lowercase : List[str] = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
lowercase : List[Any] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
lowercase : Union[str, Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowercase : str = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "attn.in_proj" in key:
lowercase : Union[str, Any] = key.split(""".""" )
if key.startswith("""visual""" ):
lowercase : Dict = key_split[3]
lowercase : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowercase : List[str] = val[
:dim, :
]
lowercase : List[str] = val[
dim : dim * 2, :
]
lowercase : Any = val[
-dim:, :
]
else:
lowercase : Optional[int] = val[
:dim
]
lowercase : List[Any] = val[
dim : dim * 2
]
lowercase : Optional[int] = val[
-dim:
]
else:
if "weight" in key:
lowercase : int = val[
:dim, :
]
lowercase : str = val[
dim : dim * 2, :
]
lowercase : Optional[int] = val[
-dim:, :
]
else:
lowercase : Any = val[:dim]
lowercase : List[Any] = val[
dim : dim * 2
]
lowercase : Any = val[-dim:]
elif key.startswith("""mit""" ):
lowercase : Optional[Any] = key_split[2]
lowercase : List[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
lowercase : str = val[:dim, :]
lowercase : Optional[int] = val[dim : dim * 2, :]
lowercase : int = val[-dim:, :]
else:
lowercase : List[Any] = val[:dim]
lowercase : str = val[dim : dim * 2]
lowercase : Dict = val[-dim:]
else:
lowercase : Optional[int] = key_split[2]
lowercase : int = config.text_config.hidden_size
if "weight" in key:
lowercase : Any = val[:dim, :]
lowercase : Optional[int] = val[
dim : dim * 2, :
]
lowercase : Tuple = val[-dim:, :]
else:
lowercase : Optional[Any] = val[:dim]
lowercase : Optional[Any] = val[
dim : dim * 2
]
lowercase : str = val[-dim:]
else:
lowercase : int = rename_key(SCREAMING_SNAKE_CASE__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowercase : Any = val.T
lowercase : Union[str, Any] = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
if num_frames == 8:
lowercase : int = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
lowercase : Optional[Any] = """eating_spaghetti.npy"""
elif num_frames == 32:
lowercase : Tuple = """eating_spaghetti_32_frames.npy"""
lowercase : Optional[int] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" , )
lowercase : Any = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ) -> Dict:
lowercase : Dict = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
lowercase : Optional[int] = model_to_url[model_name]
lowercase : Optional[Any] = 8
if "16-frames" in model_name:
lowercase : List[Any] = 16
elif "shot" in model_name:
lowercase : Optional[Any] = 32
lowercase : List[str] = get_xclip_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = XCLIPModel(SCREAMING_SNAKE_CASE__ )
model.eval()
if "drive" in checkpoint_url:
lowercase : str = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ )
lowercase : int = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
else:
lowercase : Any = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ )["""model"""]
lowercase : str = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : str = XCLIPModel(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowercase : Any = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
lowercase : Dict = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase : Optional[Any] = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase : Optional[Any] = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = prepare_video(SCREAMING_SNAKE_CASE__ )
lowercase : int = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE__ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
lowercase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
# Verify outputs
lowercase : Dict = outputs.logits_per_video
lowercase : str = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE__ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowercase : str = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowercase : List[Any] = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
lowercase : Optional[int] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowercase : List[Any] = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
lowercase : Optional[int] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowercase : Tuple = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowercase : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowercase : Tuple = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowercase : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowercase : Union[str, Any] = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowercase : Any = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowercase : List[str] = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowercase : Optional[int] = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowercase : str = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowercase : int = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowercase : Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowercase : Any = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowercase : str = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase : Dict = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 285 |
import math
from datetime import datetime, timedelta
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> datetime:
lowercase : Any = year % 19
lowercase : Optional[int] = year % 4
lowercase : Any = year % 7
lowercase : str = math.floor(year / 100 )
lowercase : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowercase : Tuple = leap_day_inhibits / 4
lowercase : Any = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowercase : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowercase : str = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowercase : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowercase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 285 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__snake_case :List[str] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict="This is a photo of {}."):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=[image] , return_tensors=self.framework)
__a = candidate_labels
__a = [hypothesis_template.format(__SCREAMING_SNAKE_CASE) for x in candidate_labels]
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE)
__a = [text_inputs]
return inputs
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = model_inputs.pop('''candidate_labels''')
__a = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = model_outputs.pop('''candidate_labels''')
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=-1).squeeze(-1)
__a = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [scores]
elif self.framework == "tf":
__a = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1)
__a = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , key=lambda __SCREAMING_SNAKE_CASE: -x[0])
]
return result
| 49 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
a , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 266 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase_ ( _lowercase : List[str]) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> Optional[Any]:
"""simple docstring"""
a__ , a__ : Union[str, Any] = emb.weight.shape
a__ : str = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
a__ : Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( _lowercase : int , _lowercase : int=None) -> List[Any]:
"""simple docstring"""
a__ : List[str] = {}
for old_key in state_dict.keys():
a__ : Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
a__ : Dict = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''')
else:
a__ : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""")
if "gate" in key:
a__ : Tuple = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""")
if "fc2" and "experts" not in key:
a__ : Optional[int] = key.replace(""".fc2.""" , """.ffn.fc2.""")
if "fc1" and "experts" not in key:
a__ : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""")
if ".encoder_attn." in key:
a__ : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""")
if "encoder_attn_layer_norm" in key:
a__ : Optional[Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""")
if "final_layer_norm" in key:
a__ : List[str] = key.replace("""final_layer_norm""" , """ff_layer_norm""")
a__ : str = state_dict[old_key]
return new_dict
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Dict , _lowercase : str = WEIGHTS_NAME) -> Tuple:
"""simple docstring"""
a__ : Tuple = []
a__ : Optional[Any] = 0
os.makedirs(_lowercase , exist_ok=_lowercase)
for expert in range(_lowercase):
a__ : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowercase):
a__ : List[str] = torch.load(_lowercase)["""model"""]
remove_ignore_keys_(_lowercase)
a__ : Tuple = rename_fairseq_keys(_lowercase , _lowercase)
a__ : str = os.path.join(
_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase)+1:05d}-of-???.bin'''))
torch.save(_lowercase , _lowercase)
sharded_state_dicts.append(expert_state.keys())
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
expert_state[list(_lowercase)[0]].dtype)
# Add the last block
a__ : int = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase)+1:05d}-of-???.bin'''))
a__ : Union[str, Any] = torch.load(switch_checkpoint_path + """-shared.pt""")["""model"""]
remove_ignore_keys_(_lowercase)
a__ : List[str] = rename_fairseq_keys(_lowercase , _lowercase)
a__ : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys())
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowercase) == 1:
a__ : Optional[int] = os.path.join(_lowercase , _lowercase)
torch.save(_lowercase , _lowercase)
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowercase , _lowercase)
# Otherwise, let's build the index
a__ : List[str] = {}
for idx, shard in enumerate(_lowercase):
a__ : Union[str, Any] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase):05d}.bin''')
a__ : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(_lowercase , os.path.join(_lowercase , _lowercase))
for key in shard:
a__ : Tuple = shard_file
# Add the metadata
a__ : Tuple = {"""total_size""": total_size}
a__ : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowercase , _lowercase) , """w""" , encoding="""utf-8""") as f:
a__ : Dict = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase) + """\n"""
f.write(_lowercase)
return metadata, index
if __name__ == "__main__":
_lowercase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowercase : Tuple =parser.parse_args()
_lowercase , _lowercase : List[Any] =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowercase : int =NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowercase : List[str] =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 266 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[str] = 'open-llama'
def __init__( self : List[str] ,_UpperCAmelCase : str=100000 ,_UpperCAmelCase : Optional[int]=4096 ,_UpperCAmelCase : Dict=11008 ,_UpperCAmelCase : Dict=32 ,_UpperCAmelCase : Dict=32 ,_UpperCAmelCase : int="silu" ,_UpperCAmelCase : Tuple=2048 ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : List[Any]=1E-6 ,_UpperCAmelCase : int=True ,_UpperCAmelCase : int=0 ,_UpperCAmelCase : List[str]=1 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : List[str]=False ,_UpperCAmelCase : List[str]=True ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : Any=0.1 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Any=None ,**_UpperCAmelCase : Union[str, Any] ,):
_a : str = vocab_size
_a : Dict = max_position_embeddings
_a : Tuple = hidden_size
_a : Any = intermediate_size
_a : Optional[int] = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Union[str, Any] = hidden_act
_a : List[str] = initializer_range
_a : List[str] = rms_norm_eps
_a : str = use_cache
_a : Tuple = kwargs.pop(
'use_memorry_efficient_attention' ,_UpperCAmelCase )
_a : Any = hidden_dropout_prob
_a : str = attention_dropout_prob
_a : List[str] = use_stable_embedding
_a : Tuple = shared_input_output_embedding
_a : List[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,tie_word_embeddings=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
_a : Optional[Any] = self.rope_scaling.get('type' ,_UpperCAmelCase )
_a : List[str] = self.rope_scaling.get('factor' ,_UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 89 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=False , _a=True , _a=False , _a=False , _a=1_9 , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Union[str, Any]:
_a : Optional[Any] = parent
_a : Union[str, Any] = batch_size
_a : List[Any] = seq_length
_a : Dict = is_training
_a : int = use_input_mask
_a : str = use_token_type_ids
_a : Any = use_labels
_a : List[Any] = vocab_size
_a : Any = hidden_size
_a : int = num_hidden_layers
_a : str = num_attention_heads
_a : Dict = intermediate_size
_a : List[str] = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Tuple = type_vocab_size
_a : str = type_sequence_label_size
_a : Any = initializer_range
_a : Union[str, Any] = num_labels
_a : Dict = num_choices
_a : Union[str, Any] = scope
def __lowercase ( self ) -> List[Any]:
_a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = None
if self.use_input_mask:
_a : int = random_attention_mask([self.batch_size, self.seq_length] )
_a : List[Any] = None
_a : Tuple = None
_a : Any = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_a : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> str:
_a : Optional[int] = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_a , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> str:
_a : Union[str, Any] = EsmForProteinFolding(config=_a ).float()
model.to(_a )
model.eval()
_a : str = model(_a , attention_mask=_a )
_a : Union[str, Any] = model(_a )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __lowercase ( self ) -> str:
_a : List[str] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Optional[Any] = config_and_inputs
_a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Any = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = ()
UpperCAmelCase__ : int = {} if is_torch_available() else {}
UpperCAmelCase__ : Optional[int] = False
def __lowercase ( self ) -> List[Any]:
_a : Optional[int] = EsmFoldModelTester(self )
_a : Dict = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@unittest.skip('''Does not support attention outputs''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Any:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self ) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@require_torch
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> Optional[int]:
_a : Dict = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
_a : Tuple = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : Optional[Any] = model(_a )['''positions''']
_a : Union[str, Any] = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _a , atol=1e-4 ) )
| 235 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A = logging.get_logger(__name__)
def __A ( ):
'''simple docstring'''
_A = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_A = json.loads(_lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_A = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_A = json.loads(_lowercase )
if not mpi_options.get('''sagemaker_mpi_enabled''' , _lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def __A ( self: List[Any] ) -> str:
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , __A , )
@cached_property
def __A ( self: Tuple ) -> "torch.device":
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
_A = torch.device('''cpu''' )
_A = 0
elif is_sagemaker_model_parallel_available():
_A = smp.local_rank()
_A = torch.device('''cuda''' , __A )
_A = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
_A = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
_A = torch.device('''cuda''' , self.local_rank )
_A = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_A = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_A = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
_A = torch.device('''cuda''' , self.local_rank )
_A = 1
if device.type == "cuda":
torch.cuda.set_device(__A )
return device
@property
def __A ( self: Optional[int] ) -> List[str]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __A ( self: str ) -> List[Any]:
return not is_sagemaker_model_parallel_available()
@property
def __A ( self: int ) -> str:
return False
| 75 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
A_ = 2
@register_to_config
def __init__( self: List[str] , __A: float = 0.02 , __A: float = 1_00 , __A: float = 1.007 , __A: float = 80 , __A: float = 0.05 , __A: float = 50 , ) -> Optional[int]:
# standard deviation of the initial noise distribution
_A = sigma_max
# setable values
_A = None
_A = None
_A = None # sigma(t_i)
def __A ( self: Any , __A: torch.FloatTensor , __A: Optional[int] = None ) -> torch.FloatTensor:
return sample
def __A ( self: Union[str, Any] , __A: int , __A: Union[str, torch.device] = None ) -> List[Any]:
_A = num_inference_steps
_A = np.arange(0 , self.num_inference_steps )[::-1].copy()
_A = torch.from_numpy(__A ).to(__A )
_A = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_A = torch.tensor(__A , dtype=torch.floataa , device=__A )
def __A ( self: List[Any] , __A: torch.FloatTensor , __A: float , __A: Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_A = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_A = 0
# sample eps ~ N(0, S_noise^2 * I)
_A = self.config.s_noise * randn_tensor(sample.shape , generator=__A ).to(sample.device )
_A = sigma + gamma * sigma
_A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __A ( self: Optional[Any] , __A: torch.FloatTensor , __A: float , __A: float , __A: torch.FloatTensor , __A: bool = True , ) -> Union[KarrasVeOutput, Tuple]:
_A = sample_hat + sigma_hat * model_output
_A = (sample_hat - pred_original_sample) / sigma_hat
_A = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A , derivative=__A , pred_original_sample=__A )
def __A ( self: Dict , __A: torch.FloatTensor , __A: float , __A: float , __A: torch.FloatTensor , __A: torch.FloatTensor , __A: torch.FloatTensor , __A: bool = True , ) -> Union[KarrasVeOutput, Tuple]:
_A = sample_prev + sigma_prev * model_output
_A = (sample_prev - pred_original_sample) / sigma_prev
_A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A , derivative=__A , pred_original_sample=__A )
def __A ( self: Any , __A: List[Any] , __A: int , __A: int ) -> int:
raise NotImplementedError()
| 75 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase( UpperCamelCase_ , UpperCamelCase_=10 ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = []
for _ in range(UpperCamelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase( UpperCamelCase_ , UpperCamelCase_=10 ) -> int:
'''simple docstring'''
UpperCamelCase = []
for step in range(UpperCamelCase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(UpperCamelCase_ , """schedule.bin""" )
torch.save(scheduler.state_dict() , UpperCamelCase_ )
UpperCamelCase = torch.load(UpperCamelCase_ )
scheduler.load_state_dict(UpperCamelCase_ )
return lrs
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertAlmostEqual(lowerCamelCase_ , lowerCamelCase_ , delta=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase_ )
UpperCamelCase = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
UpperCamelCase = criterion(lowerCamelCase_ , lowerCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase_ )
UpperCamelCase = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase_ , weight_decay=0.0 , relative_step=lowerCamelCase_ , scale_parameter=lowerCamelCase_ , warmup_init=lowerCamelCase_ , )
for _ in range(1000 ):
UpperCamelCase = criterion(lowerCamelCase_ , lowerCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__lowerCAmelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
__lowerCAmelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__lowerCAmelCase = 10
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Any=None ):
"""simple docstring"""
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertAlmostEqual(lowerCamelCase_ , lowerCamelCase_ , delta=lowerCamelCase_ , msg=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCamelCase = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
UpperCamelCase , UpperCamelCase = data
UpperCamelCase = scheduler_func(self.optimizer , **lowerCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCamelCase = unwrap_schedule(lowerCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
lowerCamelCase_ , lowerCamelCase_ , tol=1E-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
UpperCamelCase = scheduler_func(self.optimizer , **lowerCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase_ ) # wrap to test picklability of the schedule
UpperCamelCase = unwrap_and_save_reload_schedule(lowerCamelCase_ , self.num_steps )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ , msg=f"""failed for {scheduler_func} in save and reload""" )
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = fn
def __call__( self : int , *lowerCamelCase_ : int , **lowerCamelCase_ : str ):
"""simple docstring"""
return self.fn(*lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = list(map(self , scheduler.lr_lambdas ) )
| 343 | import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_SCREAMING_SNAKE_CASE = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase( UpperCamelCase_ = 100 ) -> List[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(UpperCamelCase_ )
UpperCamelCase = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase( ) -> str:
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase( ) -> int:
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
UpperCamelCase = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 343 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCAmelCase__ ( a__: Callable[[int | float], int | float] , a__: int | float , a__: int | float , a__: int = 1_0_0 , ) -> float:
'''simple docstring'''
_UpperCAmelCase = x_start
_UpperCAmelCase = fnc(a__ )
_UpperCAmelCase = 0.0
for _ in range(a__ ):
# Approximates curve as a sequence of linear lines and sums their length
_UpperCAmelCase = (x_end - x_start) / steps + xa
_UpperCAmelCase = fnc(a__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_UpperCAmelCase = xa
_UpperCAmelCase = fxa
return length
if __name__ == "__main__":
def lowerCAmelCase__ ( a__: List[Any] ) -> Optional[int]:
'''simple docstring'''
return math.sin(1_0 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
lowerCAmelCase__ :str = 1_0
while i <= 1_0_0_0_0_0:
print(f'''With {i} steps: {line_length(f, -1_0, 1_0, i)}''')
i *= 1_0
| 185 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __a ( UpperCAmelCase ):
_a : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 185 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 50 , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCamelCase : List[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase__ , )
lowerCamelCase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase : Union[str, Any] = {}
if accepts_eta:
lowerCamelCase : Tuple = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase : int = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
lowerCamelCase : int = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Tuple = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase : List[str] = self.vqvae.decode(UpperCamelCase__ ).sample
lowerCamelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase : str = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 48 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-1'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-2'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-3'''
lowerCamelCase_ = '''CompVis/stable-diffusion-v1-4'''
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
super()._init_()
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = StableDiffusionPipeline(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , requires_safety_checker=SCREAMING_SNAKE_CASE_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase_ (self ):
return {k: getattr(self , SCREAMING_SNAKE_CASE_ ) for k in self.config.keys() if not k.startswith("""_""" )}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
return self.pipea(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@torch.no_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(SCREAMING_SNAKE_CASE_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase__ = self.textaimg_sda_a(
prompt=SCREAMING_SNAKE_CASE_ , height=SCREAMING_SNAKE_CASE_ , width=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , output_type=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 244 | 0 |
SCREAMING_SNAKE_CASE__ : int = [0, 2, 4, 6, 8]
SCREAMING_SNAKE_CASE__ : Dict = [1, 3, 5, 7, 9]
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase = 0
for digit in range(10 ):
__lowerCamelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __lowerCAmelCase , __lowerCAmelCase )
return result
__lowerCamelCase = 0
for digita in range(10 ):
__lowerCamelCase = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase = ODD_DIGITS
else:
__lowerCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __lowerCAmelCase , __lowerCAmelCase , )
return result
def __magic_name__ ( __lowerCAmelCase : int = 9 ) -> int:
__lowerCamelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__lowerCAmelCase , 0 , [0] * length , __lowerCAmelCase )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 339 |
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE__ : Any = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __magic_name__ ( ) -> Any:
__lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowerCamelCase = g.get_repo('''huggingface/transformers''' )
__lowerCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
__lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 339 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowercase = ["text", "image", "audio"]
def __UpperCAmelCase ( a_):
snake_case_ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input')
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO')) / '000000039769.png').resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(a_ , a_):
inputs.append(create_inputs(a_))
else:
raise ValueError(f'''Invalid type requested: {input_type}''')
return inputs
def __UpperCAmelCase ( a_):
snake_case_ = []
for output in outputs:
if isinstance(a_ , (str, AgentText)):
output_types.append('text')
elif isinstance(a_ , (Image.Image, AgentImage)):
output_types.append('image')
elif isinstance(a_ , (torch.Tensor, AgentAudio)):
output_types.append('audio')
else:
raise ValueError(f'''Invalid output: {output}''')
return output_types
@is_tool_test
class UpperCamelCase_ :
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
snake_case_ = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case_ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = create_inputs(self.tool.inputs )
snake_case_ = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case_ = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def _UpperCamelCase ( self ) -> str:
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = create_inputs(self.tool.inputs )
snake_case_ = self.tool(*a )
if not isinstance(a , a ):
snake_case_ = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
snake_case_ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = create_inputs(self.tool.inputs )
snake_case_ = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case_ = self.tool(*a )
if not isinstance(a , a ):
snake_case_ = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 178 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=64 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = vocab_size - 1
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> int:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self , a , a , a ) -> Optional[int]:
snake_case_ = GPTNeoXModel(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
snake_case_ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a ) -> List[str]:
snake_case_ = True
snake_case_ = GPTNeoXModel(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> int:
snake_case_ = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , a , a , a , a ) -> Optional[int]:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForQuestionAnswering(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , a , a , a , a ) -> List[str]:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForSequenceClassification(a )
model.to(a )
model.eval()
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , a , a , a , a ) -> str:
snake_case_ = self.num_labels
snake_case_ = GPTNeoXForTokenClassification(a )
model.to(a )
model.eval()
snake_case_ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , a , a , a ) -> List[Any]:
snake_case_ = True
snake_case_ = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
snake_case_ = model(a , attention_mask=a , use_cache=a )
snake_case_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ = model(a , attention_mask=a , output_hidden_states=a )
snake_case_ = output_from_no_past['hidden_states'][0]
snake_case_ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _UpperCamelCase ( self ) -> str:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> str:
snake_case_ = GPTNeoXModelTester(self )
snake_case_ = ConfigTester(self , config_class=a , hidden_size=64 , num_attention_heads=8 )
def _UpperCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _UpperCamelCase ( self ) -> int:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> Dict:
# This regression test was failing with PyTorch < 1.3
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _UpperCamelCase ( self ) -> str:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _UpperCamelCase ( self ) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def _UpperCamelCase ( self ) -> List[str]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _UpperCamelCase ( self , a ) -> int:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ids_tensor([1, 10] , config.vocab_size )
snake_case_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = GPTNeoXModel(a )
original_model.to(a )
original_model.eval()
snake_case_ = original_model(a ).last_hidden_state
snake_case_ = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ = {'type': scaling_type, 'factor': 10.0}
snake_case_ = GPTNeoXModel(a )
scaled_model.to(a )
scaled_model.eval()
snake_case_ = scaled_model(a ).last_hidden_state
snake_case_ = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
snake_case_ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(a )
snake_case_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case_ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
snake_case_ = model.generate(**a , do_sample=a , max_new_tokens=20 )
snake_case_ = tokenizer.batch_decode(a )[0]
self.assertEqual(a , a )
| 178 | 1 |
'''simple docstring'''
lowerCAmelCase__ = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 371 |
'''simple docstring'''
lowerCAmelCase__ = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355_818,
}
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowercase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(A__ )}"
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = CTRLTokenizer
__A = False
__A = False
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ :Optional[Any] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowercase_ :Optional[int] = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowercase_ :Optional[Any] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowercase_ :int = {"unk_token": "<unk>"}
lowercase_ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
def lowercase__ ( self : Dict , **lowercase : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def lowercase__ ( self : Tuple , lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = "adapt react readapt apt"
lowercase_ :int = "adapt react readapt apt"
return input_text, output_text
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :List[Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ :int = "adapt react readapt apt"
lowercase_ :int = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowercase_ :Union[str, Any] = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowercase_ :Dict = tokens + [tokenizer.unk_token]
lowercase_ :Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
| 223 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class a_ ( _lowerCAmelCase ):
__A = "autoformer"
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Any , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "student_t" , lowercase : str = "nll" , lowercase : int = 1 , lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase : bool = True , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = 0 , lowercase : int = 0 , lowercase : Optional[List[int]] = None , lowercase : Optional[List[int]] = None , lowercase : int = 64 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 2 , lowercase : int = 32 , lowercase : int = 32 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 100 , lowercase : float = 0.02 , lowercase : bool = True , lowercase : int=True , lowercase : int = 10 , lowercase : int = 25 , lowercase : int = 3 , **lowercase : Dict , ):
"""simple docstring"""
lowercase_ :str = prediction_length
lowercase_ :Dict = context_length if context_length is not None else prediction_length
lowercase_ :Any = distribution_output
lowercase_ :Tuple = loss
lowercase_ :Dict = input_size
lowercase_ :Tuple = num_time_features
lowercase_ :int = lags_sequence
lowercase_ :Tuple = scaling
lowercase_ :List[Any] = num_dynamic_real_features
lowercase_ :Union[str, Any] = num_static_real_features
lowercase_ :str = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase_ :Optional[int] = cardinality
else:
lowercase_ :Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase_ :Tuple = embedding_dimension
else:
lowercase_ :Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase_ :Any = num_parallel_samples
# Transformer architecture configuration
lowercase_ :Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase_ :Union[str, Any] = d_model
lowercase_ :Optional[Any] = encoder_attention_heads
lowercase_ :Optional[Any] = decoder_attention_heads
lowercase_ :Optional[int] = encoder_ffn_dim
lowercase_ :int = decoder_ffn_dim
lowercase_ :Any = encoder_layers
lowercase_ :Optional[Any] = decoder_layers
lowercase_ :Dict = dropout
lowercase_ :Dict = attention_dropout
lowercase_ :str = activation_dropout
lowercase_ :int = encoder_layerdrop
lowercase_ :Dict = decoder_layerdrop
lowercase_ :List[str] = activation_function
lowercase_ :int = init_std
lowercase_ :Optional[int] = use_cache
# Autoformer
lowercase_ :List[str] = label_length
lowercase_ :List[str] = moving_average
lowercase_ :Optional[int] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowercase__ ( self : int ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 223 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] ={
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int =['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 94 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
snake_case_ =42
snake_case_ =jnp.floataa
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__(self ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = hidden_states.shape
lowerCAmelCase__ : Dict = jax.image.resize(
__lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method='''nearest''' ,)
lowerCAmelCase__ : Dict = self.conv(__lowerCamelCase )
return hidden_states
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
snake_case_ =42
snake_case_ =jnp.floataa
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__(self ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.conv(__lowerCamelCase )
return hidden_states
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
snake_case_ =42
snake_case_ =None
snake_case_ =0.0
snake_case_ =None
snake_case_ =jnp.floataa
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.in_channels if self.out_channels is None else self.out_channels
lowerCAmelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
lowerCAmelCase__ : Union[str, Any] = nn.Conv(
__lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
lowerCAmelCase__ : int = nn.Dense(__lowerCamelCase ,dtype=self.dtype )
lowerCAmelCase__ : List[Any] = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
lowerCAmelCase__ : Union[str, Any] = nn.Dropout(self.dropout_prob )
lowerCAmelCase__ : Optional[int] = nn.Conv(
__lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
lowerCAmelCase__ : str = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCAmelCase__ : Union[str, Any] = None
if use_nin_shortcut:
lowerCAmelCase__ : Optional[Any] = nn.Conv(
__lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding='''VALID''' ,dtype=self.dtype ,)
def __call__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=True ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = hidden_states
lowerCAmelCase__ : Dict = self.norma(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = nn.swish(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.conva(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
lowerCAmelCase__ : Dict = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase ,1 ) ,1 )
lowerCAmelCase__ : Optional[int] = hidden_states + temb
lowerCAmelCase__ : Optional[int] = self.norma(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = nn.swish(__lowerCamelCase )
lowerCAmelCase__ : int = self.dropout(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
lowerCAmelCase__ : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 94 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : List[str] ={
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["ChineseCLIPFeatureExtractor"]
__lowerCAmelCase : str =["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 237 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCAmelCase : Any =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowerCAmelCase : Optional[int] =" def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Tuple )-> Union[str, Any]:
A__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
A__ = self.transformer_dir
shutil.copy(
os.path.join(lowercase_ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
A__ = "src/transformers"
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :List[str] , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :Tuple=None )-> Optional[Any]:
A__ = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A__ = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A__ = black.format_str(lowercase_ , mode=lowercase_ )
A__ = os.path.join(self.transformer_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def UpperCAmelCase_ ( self :str )-> Optional[Any]:
A__ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , lowercase_ ) , )
# Copy consistency with a really long name
A__ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , lowercase_ , overwrite_result=re.sub("Bert" , "TestModel" , lowercase_ ) , )
def UpperCAmelCase_ ( self :Dict )-> Any:
A__ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
A__, A__ = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
self.assertFalse(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
A__, A__ = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase_ )
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A__ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A__, A__ = check_copies.convert_to_localized_md(
lowercase_ , lowercase_ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(lowercase_ , lowercase_ )
| 237 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
snake_case_ = ksize + 1
snake_case_ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
# distance from center
snake_case_ = x - ksize // 2
snake_case_ = y - ksize // 2
# degree to radiant
snake_case_ = theta / 180 * np.pi
snake_case_ = np.cos(_theta )
snake_case_ = np.sin(_theta )
# get kernel x
snake_case_ = cos_theta * px + sin_theta * py
# get kernel y
snake_case_ = -sin_theta * px + cos_theta * py
# fill kernel
snake_case_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCAmelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCAmelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCAmelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCAmelCase = out / out.max() * 255
UpperCAmelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0) | 267 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
for char in word:
snake_case_ = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = set()
for token in tokens:
snake_case_ = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
snake_case_ = list(SCREAMING_SNAKE_CASE )
return word_list
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
snake_case_ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = '''##''' + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
snake_case_ = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 100 ):
snake_case_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
snake_case_ = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
snake_case_ = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
UpperCAmelCase = parser.parse_args()
main(args) | 267 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase)
class snake_case__ ( UpperCamelCase):
a_ = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True})
a_ = Features({"audio": Audio()})
a_ = Features({"labels": ClassLabel})
a_ = "audio"
a_ = "labels"
def A ( self : Optional[int] , _A : Union[str, Any] ) -> List[str]:
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
UpperCAmelCase_ : Optional[int] = copy.deepcopy(self )
UpperCAmelCase_ : int = self.label_schema.copy()
UpperCAmelCase_ : List[Any] = features[self.label_column]
UpperCAmelCase_ : Optional[Any] = label_schema
return task_template
@property
def A ( self : Tuple ) -> Any:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 304 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : str = AudioLDMPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_AUDIO_PARAMS
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Dict = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
SCREAMING_SNAKE_CASE_ : List[str] = ClapTextModelWithProjection(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
SCREAMING_SNAKE_CASE_ : Any = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
SCREAMING_SNAKE_CASE_ : int = audio[:10]
SCREAMING_SNAKE_CASE_ : str = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.audios[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : List[str] = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ : Tuple = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE_ : str = negative_prompt
SCREAMING_SNAKE_CASE_ : List[Any] = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = output.audios[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : List[str] = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ : Any = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
embeds.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = embeds
# forward
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 'egg cracking'
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
SCREAMING_SNAKE_CASE_ : Optional[Any] = audio[:10]
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE_ : str = 2
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(audio_length_in_s=0.032 , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = ['hey']
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios.shape
assert audio_shape == (1, 256)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE_ : int = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
SCREAMING_SNAKE_CASE_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@slow
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 8, 128, 16) )
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 25
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 8_1920
SCREAMING_SNAKE_CASE_ : Any = audio[7_7230:7_7240]
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
SCREAMING_SNAKE_CASE_ : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE_ : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 8_1920
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audio[2_7780:2_7790]
SCREAMING_SNAKE_CASE_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
SCREAMING_SNAKE_CASE_ : str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 253 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : Tuple = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 301 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : Optional[int] = 'bart'
UpperCAmelCase__ : Dict = True
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Dict:
if LOAD_DENSE_INDEX:
_A: Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_A: Any = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_A: Any = qar_model.eval()
else:
_A , _A: Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_A: Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_A: Dict = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_A: Union[str, Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_A: int = sas_model.eval()
else:
_A , _A: Tuple = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Tuple:
if LOAD_DENSE_INDEX:
_A: List[Any] = faiss.StandardGpuResources()
_A: int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_A: Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
_A: str = faiss.IndexFlatIP(1_28 )
_A: Optional[int] = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
_A , _A: str = (None, None)
_A: Tuple = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> str:
_A: Dict = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_A: Dict = elia['''train_eli5''']
_A: List[Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
_A: Any = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : int = load_indexes()
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : Any = load_models()
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = load_train_data()
def lowerCamelCase__ ( a , a=10 ) -> str:
_A: Optional[int] = embed_questions_for_retrieval([question] , a , a )
_A , _A: List[str] = eli5_train_q_index.search(a , a )
_A: Dict = [elia_train[int(a )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( a , a="wiki40b" , a="dense" , a=10 ) -> str:
if source == "none":
_A , _A: Any = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_A , _A: List[Any] = query_qa_dense_index(
a , a , a , a , a , a )
else:
_A , _A: Tuple = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
_A: Union[str, Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_A: str = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def lowerCamelCase__ ( a , a , a , a=64 , a=2_56 , a=False , a=2 , a=0.95 , a=0.8 ) -> str:
with torch.no_grad():
_A: Optional[int] = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase__ : List[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase__ : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase__ : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase__ : List[str] = action_list.index(action_st)
UpperCAmelCase__ : Optional[Any] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase__ : List[Any] = show_type == 'Show full text of passages'
else:
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase__ : List[str] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase__ : Tuple = 'wiki40b'
UpperCAmelCase__ : List[Any] = 'dense'
UpperCAmelCase__ : Tuple = 'beam'
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = 64
UpperCAmelCase__ : Any = 256
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase__ : Any = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase__ : int = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : str = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Tuple = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Optional[int] = None
# start main text
UpperCAmelCase__ : Any = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase__ : List[Any] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : Any = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase__ : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase__ : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : str = support_list[:10]
UpperCAmelCase__ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase__ ,UpperCAmelCase__ : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase__ : Any = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase__ : Tuple = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : Optional[int] = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase__ : int = sec_titles.split(' & ')
UpperCAmelCase__ : Union[str, Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : Union[str, Any] = find_nearest_training(question)
UpperCAmelCase__ : int = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase__ : Tuple = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase__ : Any = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( lowercase_ ):
lowerCAmelCase__ = """vit_mae"""
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , __UpperCAmelCase=512 , __UpperCAmelCase=8 , __UpperCAmelCase=2048 , __UpperCAmelCase=0.75 , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = decoder_num_attention_heads
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = decoder_num_hidden_layers
__lowerCamelCase = decoder_intermediate_size
__lowerCamelCase = mask_ratio
__lowerCamelCase = norm_pix_loss
| 330 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = """realm"""
def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
# Common config
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = retriever_proj_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_candidates
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Reader config
lowercase__ = span_hidden_size
lowercase__ = max_span_width
lowercase__ = reader_layer_norm_eps
lowercase__ = reader_beam_size
lowercase__ = reader_seq_len
# Retrieval config
lowercase__ = num_block_records
lowercase__ = searcher_beam_size
| 2 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A : Optional[int] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:List[str] = ["input_features", "is_longer"]
def __init__( self , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4_8000 , _SCREAMING_SNAKE_CASE=480 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 1_4000 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "fusion" , _SCREAMING_SNAKE_CASE = "repeatpad" , **_SCREAMING_SNAKE_CASE , )-> Any:
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =top_db
lowerCamelCase_ =truncation
lowerCamelCase_ =padding
lowerCamelCase_ =fft_window_size
lowerCamelCase_ =(fft_window_size >> 1) + 1
lowerCamelCase_ =hop_length
lowerCamelCase_ =max_length_s
lowerCamelCase_ =max_length_s * sampling_rate
lowerCamelCase_ =sampling_rate
lowerCamelCase_ =frequency_min
lowerCamelCase_ =frequency_max
lowerCamelCase_ =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=_SCREAMING_SNAKE_CASE , max_frequency=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , norm=_SCREAMING_SNAKE_CASE , mel_scale="""htk""" , )
lowerCamelCase_ =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=_SCREAMING_SNAKE_CASE , max_frequency=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , )
def _snake_case ( self )-> Dict[str, Any]:
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> np.ndarray:
lowerCamelCase_ =spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_SCREAMING_SNAKE_CASE , log_mel="""dB""" , )
return log_mel_spectrogram.T
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
lowerCamelCase_ =np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase_ =[0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase_ =[0]
# randomly choose index for each part
lowerCamelCase_ =np.random.choice(ranges[0] )
lowerCamelCase_ =np.random.choice(ranges[1] )
lowerCamelCase_ =np.random.choice(ranges[2] )
lowerCamelCase_ =mel[idx_front : idx_front + chunk_frames, :]
lowerCamelCase_ =mel[idx_middle : idx_middle + chunk_frames, :]
lowerCamelCase_ =mel[idx_back : idx_back + chunk_frames, :]
lowerCamelCase_ =torch.tensor(mel[None, None, :] )
lowerCamelCase_ =torch.nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =mel_shrink[0][0].numpy()
lowerCamelCase_ =np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCamelCase_ =True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - max_length
lowerCamelCase_ =np.random.randint(0 , overflow + 1 )
lowerCamelCase_ =waveform[idx : idx + max_length]
lowerCamelCase_ =self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCamelCase_ =self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters )
lowerCamelCase_ =max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCamelCase_ =mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCamelCase_ =np.stack([mel, mel, mel, mel] , axis=0 )
lowerCamelCase_ =False
else:
lowerCamelCase_ =self._random_mel_fusion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
lowerCamelCase_ =False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCamelCase_ =int(max_length / len(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =np.stack(np.tile(_SCREAMING_SNAKE_CASE , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCamelCase_ =int(max_length / len(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =np.stack(np.tile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =np.pad(_SCREAMING_SNAKE_CASE , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
lowerCamelCase_ =self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters )
lowerCamelCase_ =np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCamelCase_ =self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> BatchFeature:
lowerCamelCase_ =truncation if truncation is not None else self.truncation
lowerCamelCase_ =padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase_ =isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase_ =is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ =[np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
lowerCamelCase_ =np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ =[np.asarray(_SCREAMING_SNAKE_CASE )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCamelCase_ =[
self._get_input_mel(_SCREAMING_SNAKE_CASE , max_length if max_length else self.nb_max_samples , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for waveform in raw_speech
]
lowerCamelCase_ =[]
lowerCamelCase_ =[]
for mel, longer in padded_inputs:
input_mel.append(_SCREAMING_SNAKE_CASE )
is_longer.append(_SCREAMING_SNAKE_CASE )
if truncation == "fusion" and sum(_SCREAMING_SNAKE_CASE ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCamelCase_ =np.random.randint(0 , len(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =True
if isinstance(input_mel[0] , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =[np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCamelCase_ =[[longer] for longer in is_longer]
lowerCamelCase_ ={"""input_features""": input_mel, """is_longer""": is_longer}
lowerCamelCase_ =BatchFeature(_SCREAMING_SNAKE_CASE )
if return_tensors is not None:
lowerCamelCase_ =input_features.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return input_features
| 49 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__A : Optional[Any] = logging.getLogger()
def __UpperCamelCase ( _A : Path , _A : list ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ="""\n""".join(_A )
Path(_A ).open("""w""" ).writelines(_A )
__A : List[str] = 'patrickvonplaten/t5-tiny-random'
__A : List[Any] = 'sshleifer/bart-tiny-random'
__A : List[str] = 'sshleifer/tiny-mbart'
__A : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict:
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowerCamelCase_ =input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowerCamelCase_ =[""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
lowerCamelCase_ ="""translation_en_to_de""" if model == T5_TINY else """summarization"""
lowerCamelCase_ =f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
run_generate()
assert Path(_SCREAMING_SNAKE_CASE ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self )-> List[Any]:
self.run_eval_tester(_SCREAMING_SNAKE_CASE )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> int:
self.run_eval_tester(_SCREAMING_SNAKE_CASE )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowerCamelCase_ =input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowerCamelCase_ ={
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() )
lowerCamelCase_ =str(tmp_dir / """scores.json""" )
lowerCamelCase_ =str(tmp_dir / """val.target""" )
_dump_articles(_SCREAMING_SNAKE_CASE , text["""en"""] )
_dump_articles(_SCREAMING_SNAKE_CASE , text["""de"""] )
lowerCamelCase_ ="""translation_en_to_de""" if model == T5_TINY else """summarization"""
lowerCamelCase_ =f'\n run_eval_search.py\n {model}\n {str(_SCREAMING_SNAKE_CASE )}\n {str(_SCREAMING_SNAKE_CASE )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
with CaptureStdout() as cs:
run_search()
lowerCamelCase_ =[""" num_beams | length_penalty""", model, """Best score args"""]
lowerCamelCase_ =["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(_SCREAMING_SNAKE_CASE )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_SCREAMING_SNAKE_CASE ).exists()
os.remove(Path(_SCREAMING_SNAKE_CASE ) )
| 49 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302 | 1 |
from math import factorial
def UpperCamelCase ( _A = 20 ):
"""simple docstring"""
__magic_name__ : Any = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__magic_name__ : Any = n // 2
return int(factorial(_A ) / (factorial(_A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__magic_name__: int = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 352 |
__magic_name__: List[Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__magic_name__: Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__magic_name__: Optional[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 138 | 0 |
"""simple docstring"""
import functools
from typing import Any
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# Validation
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or len(_UpperCamelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not all(
isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
__lowerCAmelCase : dict[str, Any] = {}
__lowerCAmelCase : Optional[Any] = 'WORD_KEEPER'
for word in words:
__lowerCAmelCase : Optional[int] = trie
for c in word:
if c not in trie_node:
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : Any = trie_node[c]
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[str] = len(_UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_UpperCamelCase ) -> bool:
if index == len_string:
return True
__lowerCAmelCase : Union[str, Any] = trie
for i in range(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = trie_node.get(string[i] , _UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(_UpperCamelCase , _UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() ) | 74 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase_ ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 367 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : int = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"unc-nlp/lxmert-base-uncased": 512,
}
__A : Optional[Any] = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = LxmertTokenizer
def __init__( self : Dict , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]="[UNK]" , lowerCamelCase : Tuple="[SEP]" , lowerCamelCase : str="[PAD]" , lowerCamelCase : Any="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Dict=True , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
lowerCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Tuple = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : Optional[Any] = do_lower_case
lowerCAmelCase_ : Dict = strip_accents
lowerCAmelCase_ : List[Any] = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCamelCase )
lowerCAmelCase_ : Tuple = do_lower_case
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None ) -> int:
lowerCAmelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase_ : int = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
| 89 | 0 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
snake_case_ : List[Any] = numpy.array([0, 0])
snake_case_ : Optional[Any] = numpy.array([0.5, 0.8_66_02_54])
snake_case_ : int = numpy.array([1, 0])
snake_case_ : Optional[int] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = initial_vectors
for _ in range(UpperCAmelCase_ ):
_UpperCamelCase : Any = iteration_step(UpperCAmelCase_ )
return vectors
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : int = []
for i, start_vector in enumerate(vectors[:-1] ):
_UpperCamelCase : Union[str, Any] = vectors[i + 1]
new_vectors.append(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Dict = numpy.radians(UpperCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Any = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ )
_UpperCamelCase : int = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Any = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_UpperCamelCase , _UpperCamelCase : Optional[Any] = zip(*UpperCAmelCase_ )
plt.plot(UpperCAmelCase_ , UpperCAmelCase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Any = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 83 |
def __UpperCamelCase ( _lowerCAmelCase ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 116 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0.0
for coeff in reversed(_UpperCamelCase ):
__lowerCAmelCase = result * x + coeff
return result
if __name__ == "__main__":
A : str = (0.0, 0.0, 5.0, 9.3, 7.0)
A : Dict = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 259 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [False] * len(_UpperCamelCase )
__lowerCAmelCase = []
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [-1] * (len(_UpperCamelCase ))
__lowerCAmelCase = 0
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = float("Inf" )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(_UpperCamelCase , graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
return max_flow
A : Optional[Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
A , A : Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259 | 1 |
lowerCAmelCase__ = [
(1_0_0_0, '''M'''),
(9_0_0, '''CM'''),
(5_0_0, '''D'''),
(4_0_0, '''CD'''),
(1_0_0, '''C'''),
(9_0, '''XC'''),
(5_0, '''L'''),
(4_0, '''XL'''),
(1_0, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
lowercase__ : int = 0
lowercase__ : int = 0
while place < len(lowerCamelCase__ ):
if (place + 1 < len(lowerCamelCase__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) : Optional[Any] = divmod(lowerCamelCase__ , lowerCamelCase__ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ):
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase__ : Union[str, Any] = timm.create_model("levit_128s" , pretrained=lowerCamelCase__ )
else:
lowercase__ : Union[str, Any] = timm.create_model("levit_128" , pretrained=lowerCamelCase__ )
if hidden_sizes == 192:
lowercase__ : Dict = timm.create_model("levit_192" , pretrained=lowerCamelCase__ )
if hidden_sizes == 256:
lowercase__ : Optional[Any] = timm.create_model("levit_256" , pretrained=lowerCamelCase__ )
if hidden_sizes == 384:
lowercase__ : List[str] = timm.create_model("levit_384" , pretrained=lowerCamelCase__ )
from_model.eval()
lowercase__ : Union[str, Any] = LevitForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
lowercase__ : Tuple = OrderedDict()
lowercase__ : Dict = from_model.state_dict()
lowercase__ : Union[str, Any] = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase__ )
lowercase__ : List[str] = torch.randn((2, 3, 224, 224) )
lowercase__ : Optional[Any] = from_model(lowerCamelCase__ )
lowercase__ : Optional[Any] = our_model(lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ ), "The model logits don't match the original one."
lowercase__ : Optional[Any] = name
print(lowerCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True ):
"""simple docstring"""
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : str = 1_000
lowercase__ : Any = (1, num_labels)
lowercase__ : Optional[Any] = "huggingface/label-files"
lowercase__ : Optional[Any] = num_labels
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Dict = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[Any] = partial(lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ )
lowercase__ : List[str] = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
lowercase__ : int = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase__ , names_to_config[model_name] , lowerCamelCase__ , lowerCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 130 | 1 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A_ :List[Any] = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def A ( a_ ) -> Dict:
__UpperCamelCase : Tuple =test_results.split(' ' )
__UpperCamelCase : List[Any] =0
__UpperCamelCase : Tuple =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__UpperCamelCase : Any =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(a_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A ( a_ ) -> Dict:
__UpperCamelCase : Any ={}
__UpperCamelCase : Optional[Any] =None
__UpperCamelCase : int =False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' ,a_ ):
__UpperCamelCase : Dict =True
__UpperCamelCase : Union[str, Any] =line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
__UpperCamelCase : Union[str, Any] =line
__UpperCamelCase : str =False
return failures
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =title
__UpperCamelCase : str =doc_test_results["time_spent"].split(',' )[0]
__UpperCamelCase : str =doc_test_results["success"]
__UpperCamelCase : Tuple =doc_test_results["failures"]
__UpperCamelCase : Optional[Any] =self.n_success + self.n_failures
# Failures and success of the modeling tests
__UpperCamelCase : Optional[Any] =doc_test_results
@property
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =[self._time_spent]
__UpperCamelCase : Optional[Any] =0
for time in time_spent:
__UpperCamelCase : Optional[Any] =time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase__ ) == 1:
__UpperCamelCase : List[str] =[0, 0, time_parts[0]]
__UpperCamelCase : Any =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__UpperCamelCase : Optional[int] =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'{int(lowerCAmelCase__ )}h{int(lowerCAmelCase__ )}m{int(lowerCAmelCase__ )}s'
@property
def __lowercase ( self ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __lowercase ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def __lowercase ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =40
__UpperCamelCase : Union[str, Any] ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )}
__UpperCamelCase : List[Any] =""
for category, failures in category_failures.items():
if len(lowerCAmelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase__ )
@staticmethod
def __lowercase ( ):
"""simple docstring"""
__UpperCamelCase : int =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(lowerCAmelCase__ )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=lowerCAmelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
__UpperCamelCase : int =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
__UpperCamelCase : int =client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=lowerCAmelCase__ , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =""
for key, value in failures.items():
__UpperCamelCase : Tuple =value[:200] + " [Truncated]" if len(lowerCAmelCase__ ) > 250 else value
failures_text += f'*{key}*\n_{value}_\n\n'
__UpperCamelCase : List[Any] =job_name
__UpperCamelCase : str ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
__UpperCamelCase : Union[str, Any] ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __lowercase ( self ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
__UpperCamelCase : int =self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
__UpperCamelCase : List[Any] =sorted(self.doc_test_results.items() , key=lambda lowerCamelCase__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
__UpperCamelCase : Dict =f'*Num failures* :{len(job_result["failed"] )} \n'
__UpperCamelCase : Union[str, Any] =job_result["failures"]
__UpperCamelCase : Optional[Any] =self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__ )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def A ( ) -> Tuple:
__UpperCamelCase : Optional[Any] =os.environ["GITHUB_RUN_ID"]
__UpperCamelCase : List[Any] =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
__UpperCamelCase : Dict =requests.get(a_ ).json()
__UpperCamelCase : Optional[Any] ={}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
__UpperCamelCase : Tuple =math.ceil((result['total_count'] - 100) / 100 )
for i in range(a_ ):
__UpperCamelCase : Any =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' ,a_ )
return {}
def A ( a_ ) -> Any:
__UpperCamelCase : List[str] ={}
if os.path.exists(a_ ):
__UpperCamelCase : int =os.listdir(a_ )
for file in files:
try:
with open(os.path.join(a_ ,a_ ) ,encoding='utf-8' ) as f:
__UpperCamelCase : Tuple =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(a_ ,a_ )}.' ) from e
return _artifact
def A ( ) -> int:
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =name
__UpperCamelCase : List[str] =[]
def __str__( self ):
"""simple docstring"""
return self.name
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
self.paths.append({'name': self.name, 'path': path} )
__UpperCamelCase : Dict[str, Artifact] ={}
__UpperCamelCase : Optional[int] =filter(os.path.isdir ,os.listdir() )
for directory in directories:
__UpperCamelCase : Optional[Any] =directory
if artifact_name not in _available_artifacts:
__UpperCamelCase : Tuple =Artifact(a_ )
_available_artifacts[artifact_name].add_path(a_ )
return _available_artifacts
if __name__ == "__main__":
A_ :Optional[Any] = get_job_links()
A_ :Optional[Any] = retrieve_available_artifacts()
A_ :int = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A_ :Tuple = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A_ :List[str] = github_actions_job_links.get('''run_doctests''')
A_ :str = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
A_ :Any = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
A_ ,A_ ,A_ :Any = handle_test_results(artifact['''stats'''])
A_ :List[str] = failed
A_ :int = success
A_ :Optional[Any] = time_spent[1:-1] + ''', '''
A_ :Tuple = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
A_ :Union[str, Any] = line.replace('''FAILED ''', '''''')
A_ :Dict = line.split()[0].replace('''\n''', '''''')
if "::" in line:
A_ ,A_ :str = line.split('''::''')
else:
A_ ,A_ :str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A_ :Any = docs[file_regex]
doc_test_results[category]["failed"].append(test)
A_ :Dict = all_failures[test] if test in all_failures else '''N/A'''
A_ :List[Any] = failure
break
A_ :Union[str, Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 367 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A ( a_ ) -> int:
__UpperCamelCase : List[Any] =botoa.client('iam' )
__UpperCamelCase : List[str] ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a_ ,AssumeRolePolicyDocument=json.dumps(a_ ,indent=2 ) )
__UpperCamelCase : List[str] ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a_ ,PolicyName=F'{role_name}_policy_permission' ,PolicyDocument=json.dumps(a_ ,indent=2 ) ,)
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase : List[Any] =botoa.client('iam' )
return iam_client.get_role(RoleName=a_ )["Role"]["Arn"]
def A ( ) -> Tuple:
__UpperCamelCase : Any =_ask_options(
'How do you want to authorize?' ,['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] ,a_ ,)
__UpperCamelCase : str =None
if credentials_configuration == 0:
__UpperCamelCase : str =_ask_field('Enter your AWS Profile name: [default] ' ,default='default' )
__UpperCamelCase : Optional[Any] =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
__UpperCamelCase : int =_ask_field('AWS Access Key ID: ' )
__UpperCamelCase : Dict =aws_access_key_id
__UpperCamelCase : Any =_ask_field('AWS Secret Access Key: ' )
__UpperCamelCase : Optional[Any] =aws_secret_access_key
__UpperCamelCase : Tuple =_ask_field('Enter your AWS Region: [us-east-1]' ,default='us-east-1' )
__UpperCamelCase : List[str] =aws_region
__UpperCamelCase : Any =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' ,['Provide IAM Role name', 'Create new IAM role using credentials'] ,a_ ,)
if role_management == 0:
__UpperCamelCase : Optional[Any] =_ask_field('Enter your IAM role name: ' )
else:
__UpperCamelCase : Dict ='accelerate_sagemaker_execution_role'
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(a_ )
__UpperCamelCase : List[Any] =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : int =None
if is_custom_docker_image:
__UpperCamelCase : List[Any] =_ask_field('Enter your Docker image: ' ,lambda a_ : str(a_ ).lower() )
__UpperCamelCase : Union[str, Any] =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Optional[Any] =None
if is_sagemaker_inputs_enabled:
__UpperCamelCase : Optional[Any] =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' ,lambda a_ : str(a_ ).lower() ,)
__UpperCamelCase : str =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Dict =None
if is_sagemaker_metrics_enabled:
__UpperCamelCase : Optional[Any] =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' ,lambda a_ : str(a_ ).lower() ,)
__UpperCamelCase : int =_ask_options(
'What is the distributed mode?' ,['No distributed training', 'Data parallelism'] ,_convert_sagemaker_distributed_mode ,)
__UpperCamelCase : int ={}
__UpperCamelCase : str =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
if use_dynamo:
__UpperCamelCase : Dict ='dynamo_'
__UpperCamelCase : Optional[int] =_ask_options(
'Which dynamo backend would you like to use?' ,[x.lower() for x in DYNAMO_BACKENDS] ,_convert_dynamo_backend ,default=2 ,)
__UpperCamelCase : Tuple =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
if use_custom_options:
__UpperCamelCase : List[str] =_ask_options(
'Which mode do you want to use?' ,a_ ,lambda a_ : TORCH_DYNAMO_MODES[int(a_ )] ,default='default' ,)
__UpperCamelCase : Union[str, Any] =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Tuple =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Tuple ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
__UpperCamelCase : int =_ask_options(
a_ ,a_ ,lambda a_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCamelCase : List[str] =_ask_field(a_ ,lambda a_ : str(a_ ).lower() ,default='ml.p3.2xlarge' )
__UpperCamelCase : Union[str, Any] =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCamelCase : List[str] =_ask_field(
'How many machines do you want use? [1]: ' ,a_ ,default=1 ,)
__UpperCamelCase : Optional[Any] =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' ,['no', 'fp16', 'bf16', 'fp8'] ,_convert_mixed_precision ,)
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=a_ ,compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER ,distributed_type=a_ ,use_cpu=a_ ,dynamo_config=a_ ,eca_instance_type=a_ ,profile=a_ ,region=a_ ,iam_role_name=a_ ,mixed_precision=a_ ,num_machines=a_ ,sagemaker_inputs_file=a_ ,sagemaker_metrics_file=a_ ,)
| 245 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A ( __UpperCAmelCase ):
__snake_case = 'Wav2Vec2FeatureExtractor'
__snake_case = 'AutoTokenizer'
def __init__( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
super().__init__(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ = self.feature_extractor
lowerCAmelCase_ = False
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
try:
return super().from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''', UpperCamelCase__, )
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = WavaVecaCTCTokenizer.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
return cls(feature_extractor=UpperCamelCase__, tokenizer=UpperCamelCase__ )
def __call__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__, **UpperCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase_ = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase_ = kwargs.pop('''audio''', UpperCamelCase__ )
lowerCAmelCase_ = kwargs.pop('''sampling_rate''', UpperCamelCase__ )
lowerCAmelCase_ = kwargs.pop('''text''', UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
lowerCAmelCase_ = args[0]
lowerCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase_ = self.feature_extractor(UpperCamelCase__, *UpperCamelCase__, sampling_rate=UpperCamelCase__, **UpperCamelCase__ )
if text is not None:
lowerCAmelCase_ = self.tokenizer(UpperCamelCase__, **UpperCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase_ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCamelCase__, **UpperCamelCase__ )
lowerCAmelCase_ = kwargs.pop('''input_features''', UpperCamelCase__ )
lowerCAmelCase_ = kwargs.pop('''labels''', UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
lowerCAmelCase_ = args[0]
lowerCAmelCase_ = args[1:]
if input_features is not None:
lowerCAmelCase_ = self.feature_extractor.pad(UpperCamelCase__, *UpperCamelCase__, **UpperCamelCase__ )
if labels is not None:
lowerCAmelCase_ = self.tokenizer.pad(UpperCamelCase__, **UpperCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase_ = labels['''input_ids''']
return input_features
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer
yield
lowerCAmelCase_ = self.feature_extractor
lowerCAmelCase_ = False
| 278 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
__snake_case = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = 0.5
assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278 | 1 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _A (__a , __a , __a = "x" , __a = 10**-10 , __a = 1 , ) -> complex:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = symbols(__a )
SCREAMING_SNAKE_CASE_ : Tuple = lambdify(__a , __a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lambdify(__a , diff(__a , __a ) )
SCREAMING_SNAKE_CASE_ : Dict = starting_point
while True:
if diff_function(__a ) != 0:
SCREAMING_SNAKE_CASE_ : int = prev_guess - multiplicity * func(__a ) / diff_function(
__a )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE_ : Optional[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'''{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 318 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def _A (__a ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_ : Optional[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 318 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str=13 , lowerCAmelCase_ : str=30 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Any=10 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=0.6 , lowerCAmelCase_ : int=None , ) -> Optional[int]:
'''simple docstring'''
A__ : str =parent
A__ : Optional[Any] =batch_size
A__ : Union[str, Any] =image_size
A__ : List[Any] =patch_size
A__ : List[Any] =num_channels
A__ : Optional[int] =is_training
A__ : Dict =use_labels
A__ : int =hidden_size
A__ : List[Any] =num_hidden_layers
A__ : List[str] =num_attention_heads
A__ : List[Any] =intermediate_size
A__ : List[Any] =hidden_act
A__ : Optional[Any] =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Union[str, Any] =type_sequence_label_size
A__ : Union[str, Any] =initializer_range
A__ : List[Any] =mask_ratio
A__ : str =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A__ : Dict =(image_size // patch_size) ** 2
A__ : Tuple =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : List[str] =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : int =self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ : int =ViTMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
A__ : Optional[int] =ViTMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ )
A__ : Dict =(self.image_size // self.patch_size) ** 2
A__ : int =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A__ : Optional[int] =1
A__ : Optional[Any] =ViTMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ : str =model(lowerCAmelCase_ )
A__ : str =self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.prepare_config_and_inputs()
A__ , A__ , A__ : Dict =config_and_inputs
A__ : Optional[Any] ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__snake_case = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
A__ : str =ViTMAEModelTester(self )
A__ : Tuple =ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Dict =model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int =model_class(lowerCAmelCase_ )
A__ : int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : str =[*signature.parameters.keys()]
A__ : Union[str, Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
# make masks reproducible
np.random.seed(2 )
A__ : Optional[Any] =int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A__ : List[Any] =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A__ : List[str] =torch.from_numpy(lowerCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A__ : Union[str, Any] =pt_noise
super().check_pt_tf_models(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ , A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[Any] =model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
A__ : Union[str, Any] =outputs[0].cpu().numpy()
A__ : List[Any] =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
A__ : Union[str, Any] =model_class.from_pretrained(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A__ : Union[str, Any] =model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Make sure we don't have nans
A__ : Optional[Any] =after_outputs[0].cpu().numpy()
A__ : Optional[Any] =0
A__ : List[Any] =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[Any] =ViTMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[str] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A__ : int =ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowerCAmelCase_ )
A__ : Optional[int] =self.default_image_processor
A__ : Optional[int] =prepare_img()
A__ : Any =image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A__ : Optional[int] =ViTMAEConfig()
A__ : List[str] =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A__ : str =np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A__ : Optional[int] =model(**lowerCAmelCase_ , noise=torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ ) )
# verify the logits
A__ : int =torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
A__ : Any =torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCAmelCase_ ) , atol=1e-4 ) )
| 134 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(__snake_case : int, __snake_case : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A__ : int =update_area_of_max_square(__snake_case, col + 1 )
A__ : int =update_area_of_max_square(row + 1, col + 1 )
A__ : int =update_area_of_max_square(row + 1, __snake_case )
if mat[row][col]:
A__ : Optional[Any] =1 + min([right, diagonal, down] )
A__ : Dict =max(largest_square_area[0], __snake_case )
return sub_problem_sol
else:
return 0
A__ : List[Any] =[0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A__ : str =update_area_of_max_square_using_dp_array(__snake_case, col + 1, __snake_case )
A__ : Any =update_area_of_max_square_using_dp_array(row + 1, col + 1, __snake_case )
A__ : List[str] =update_area_of_max_square_using_dp_array(row + 1, __snake_case, __snake_case )
if mat[row][col]:
A__ : Optional[int] =1 + min([right, diagonal, down] )
A__ : Any =max(largest_square_area[0], __snake_case )
A__ : Union[str, Any] =sub_problem_sol
return sub_problem_sol
else:
return 0
A__ : Any =[0]
A__ : Optional[Any] =[[-1] * cols for _ in range(__snake_case )]
update_area_of_max_square_using_dp_array(0, 0, __snake_case )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Optional[int] =[[0] * (cols + 1) for _ in range(rows + 1 )]
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : List[Any] =dp_array[row][col + 1]
A__ : List[str] =dp_array[row + 1][col + 1]
A__ : str =dp_array[row + 1][col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Optional[Any] =max(dp_array[row][col], __snake_case )
else:
A__ : Tuple =0
return largest_square_area
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =[0] * (cols + 1)
A__ : int =[0] * (cols + 1)
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : Union[str, Any] =current_row[col + 1]
A__ : List[str] =next_row[col + 1]
A__ : str =next_row[col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Dict =max(current_row[col], __snake_case )
else:
A__ : str =0
A__ : Optional[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 134 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__="pt" ):
lowercase__ = {'add_prefix_space': True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(" " ) else {}
lowercase__ = padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding="max_length" if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__=None , ):
lowercase__ = input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase ( a__ ):
def __init__( self :Optional[int] , _lowercase :Dict , _lowercase :List[Any] , _lowercase :Dict , _lowercase :List[Any] , _lowercase :Tuple="train" , _lowercase :Optional[Any]=None , _lowercase :Dict=None , _lowercase :List[str]=None , _lowercase :Tuple="" , ):
'''simple docstring'''
super().__init__()
lowercase__ = Path(SCREAMING_SNAKE_CASE_ ).joinpath(type_path + ".source" )
lowercase__ = Path(SCREAMING_SNAKE_CASE_ ).joinpath(type_path + ".target" )
lowercase__ = self.get_char_lens(self.src_file )
lowercase__ = max_source_length
lowercase__ = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
lowercase__ = tokenizer
lowercase__ = prefix
if n_obs is not None:
lowercase__ = self.src_lens[:n_obs]
lowercase__ = src_lang
lowercase__ = tgt_lang
def __len__( self :List[str] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self :str , _lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = index + 1 # linecache starts at 1
lowercase__ = self.prefix + linecache.getline(str(self.src_file ) , SCREAMING_SNAKE_CASE_ ).rstrip("\n" )
lowercase__ = linecache.getline(str(self.tgt_file ) , SCREAMING_SNAKE_CASE_ ).rstrip("\n" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase__ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ ) else self.tokenizer
)
lowercase__ = self.tokenizer.generator if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ ) else self.tokenizer
lowercase__ = encode_line(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.max_source_length , "right" )
lowercase__ = encode_line(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.max_target_length , "right" )
lowercase__ = source_inputs['input_ids'].squeeze()
lowercase__ = target_inputs['input_ids'].squeeze()
lowercase__ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase ( _lowercase :str ):
'''simple docstring'''
return [len(SCREAMING_SNAKE_CASE_ ) for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = torch.stack([x["input_ids"] for x in batch] )
lowercase__ = torch.stack([x["attention_mask"] for x in batch] )
lowercase__ = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase__ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ )
else self.tokenizer.pad_token_id
)
lowercase__ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ )
else self.tokenizer.pad_token_id
)
lowercase__ = trim_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = trim_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowercase__ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_snake_case = getLogger(__name__)
def _A ( __magic_name__ ):
return list(itertools.chain.from_iterable(snake_case__ ) )
def _A ( __magic_name__ ):
lowercase__ = get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , "git_log.json" ) )
def _A ( __magic_name__ , __magic_name__ , __magic_name__=4 , **__magic_name__ ):
with open(snake_case__ , "w" ) as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ )
def _A ( __magic_name__ ):
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def _A ( ):
lowercase__ = git.Repo(search_parent_directories=snake_case__ )
lowercase__ = {
'repo_id': str(snake_case__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( __magic_name__ , __magic_name__ ):
return list(map(snake_case__ , snake_case__ ) )
def _A ( __magic_name__ , __magic_name__ ):
with open(snake_case__ , "wb" ) as f:
return pickle.dump(snake_case__ , snake_case__ )
def _A ( __magic_name__ ):
def remove_articles(__magic_name__ ):
return re.sub(R"\b(a|an|the)\b" , " " , snake_case__ )
def white_space_fix(__magic_name__ ):
return " ".join(text.split() )
def remove_punc(__magic_name__ ):
lowercase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__magic_name__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = normalize_answer(snake_case__ ).split()
lowercase__ = normalize_answer(snake_case__ ).split()
lowercase__ = Counter(snake_case__ ) & Counter(snake_case__ )
lowercase__ = sum(common.values() )
if num_same == 0:
return 0
lowercase__ = 1.0 * num_same / len(snake_case__ )
lowercase__ = 1.0 * num_same / len(snake_case__ )
lowercase__ = (2 * precision * recall) / (precision + recall)
return fa
def _A ( __magic_name__ , __magic_name__ ):
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def _A ( __magic_name__ , __magic_name__ ):
assert len(snake_case__ ) == len(snake_case__ )
lowercase__ = 0
for hypo, pred in zip(snake_case__ , snake_case__ ):
em += exact_match_score(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def _A ( __magic_name__ ):
return model_prefix.startswith("rag" )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase__ = 'dropout_rate'
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__ ):
if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ):
logger.info("config doesn\'t have a `{}` attribute".format(snake_case__ ) )
delattr(snake_case__ , snake_case__ )
continue
lowercase__ = p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) )
delattr(snake_case__ , snake_case__ )
return hparams, config
| 365 |
from itertools import count
def _A ( __magic_name__ = 50 ):
lowercase__ = [1] * min_block_length
for n in count(__magic_name__ ):
fill_count_functions.append(1 )
for block_length in range(__magic_name__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 201 | 0 |
"""simple docstring"""
class a :
def __init__( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: dict[str, TrieNode] ={} # Mapping from char to TrieNode
SCREAMING_SNAKE_CASE_: int =False
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
for word in words:
self.insert(_lowerCamelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self
for char in word:
if char not in curr.nodes:
SCREAMING_SNAKE_CASE_: Optional[int] =TrieNode()
SCREAMING_SNAKE_CASE_: int =curr.nodes[char]
SCREAMING_SNAKE_CASE_: int =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self
for char in word:
if char not in curr.nodes:
return False
SCREAMING_SNAKE_CASE_: Tuple =curr.nodes[char]
return curr.is_leaf
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : int ) -> Tuple:
'''simple docstring'''
def _delete(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Any ) -> bool:
if index == len(_lowerCamelCase ):
# If word does not exist
if not curr.is_leaf:
return False
SCREAMING_SNAKE_CASE_: List[Any] =False
return len(curr.nodes ) == 0
SCREAMING_SNAKE_CASE_: List[Any] =word[index]
SCREAMING_SNAKE_CASE_: Union[str, Any] =curr.nodes.get(_lowerCamelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
SCREAMING_SNAKE_CASE_: Dict =_delete(_lowerCamelCase , _lowerCamelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _lowerCamelCase , 0 )
def __magic_name__ ( lowercase , lowercase ):
if node.is_leaf:
print(lowercase , end=""" """ )
for key, value in node.nodes.items():
print_words(lowercase , word + key )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""banana bananas bandana band apple all beast""".split()
SCREAMING_SNAKE_CASE_: Dict =TrieNode()
root.insert_many(lowercase )
# print_words(root, "")
assert all(root.find(lowercase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __magic_name__ ( lowercase , lowercase ):
print(str(lowercase ) , """works!""" if passes else """doesn't work :(""" )
def __magic_name__ ( ):
assert test_trie()
def __magic_name__ ( ):
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 173 |
"""simple docstring"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> bool:
UpperCAmelCase__ : Any = len(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCAmelCase__ : int = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCAmelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCAmelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCAmelCase__ : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 | 0 |
'''simple docstring'''
from math import sqrt
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =0
for i in range(1, int(sqrt(UpperCamelCase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase__ ):
total += i + n // i
elif i == sqrt(UpperCamelCase__ ):
total += i
return total - n
def _a( UpperCamelCase__ : int = 1_0_0_0_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =sum(
i
for i in range(1, UpperCamelCase__ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase__ ) ) == i and sum_of_divisors(UpperCamelCase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 222 |
'''simple docstring'''
import math
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return math.sqrt(UpperCamelCase__ ) * math.sqrt(UpperCamelCase__ ) == num
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =0
SCREAMING_SNAKE_CASE__ : Any =n
while left <= right:
SCREAMING_SNAKE_CASE__ : str =(left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =mid - 1
else:
SCREAMING_SNAKE_CASE__ : Tuple =mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 222 | 1 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCamelCase__ :
def __init__( self :Optional[Any] , _A :Optional[int] , _A :Optional[Any]=99 , _A :Optional[int]=13 , _A :int=16 , _A :str=7 , _A :List[Any]=True , _A :List[Any]=True , _A :Any=True , _A :List[str]=False , _A :int=True , _A :Union[str, Any]=2 , _A :List[str]=32 , _A :Optional[Any]=4 , _A :int=4 , _A :int=30 , _A :Dict=0 , _A :List[str]=1 , _A :Union[str, Any]=2 , _A :int=None , ) -> Optional[int]:
'''simple docstring'''
__A = parent
__A = batch_size
__A = decoder_seq_length
# For common tests
__A = self.decoder_seq_length
__A = is_training
__A = use_attention_mask
__A = use_labels
__A = vocab_size
__A = d_model
__A = d_model
__A = decoder_layers
__A = decoder_layers
__A = decoder_ffn_dim
__A = decoder_attention_heads
__A = decoder_attention_heads
__A = eos_token_id
__A = bos_token_id
__A = pad_token_id
__A = decoder_start_token_id
__A = use_cache
__A = max_position_embeddings
__A = None
__A = decoder_seq_length
__A = 2
__A = 1
def lowercase_ ( self :List[str] ) -> Any:
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__A = None
if self.use_attention_mask:
__A = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__A = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase_ ( self :int , _A :Tuple , _A :Union[str, Any] , _A :Dict , _A :Any , ) -> int:
'''simple docstring'''
__A = True
__A = TrOCRDecoder(config=_A ).to(_A ).eval()
__A = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__A = model(_A , use_cache=_A )
__A = model(_A )
__A = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
__A = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
__A = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__A = torch.cat([input_ids, next_tokens] , dim=-1 )
__A = model(_A )['last_hidden_state']
__A = model(_A , past_key_values=_A )['last_hidden_state']
# select random slice
__A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__A = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__A = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_A , _A , atol=1E-3 )
def lowercase_ ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
UpperCAmelCase__ : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Dict = False
def lowercase_ ( self :Any ) -> List[str]:
'''simple docstring'''
__A = TrOCRStandaloneDecoderModelTester(self , is_training=_A )
__A = ConfigTester(self , config_class=_A )
def lowercase_ ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase_ ( self :List[Any] ) -> Dict:
'''simple docstring'''
pass
def lowercase_ ( self :Any ) -> Tuple:
'''simple docstring'''
pass
def lowercase_ ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_A )
def lowercase_ ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowercase_ ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
pass
| 161 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : Optional[Any] = DebertaVaTokenizer
_a : Optional[Any] = DebertaVaTokenizerFast
_a : List[str] = True
_a : Optional[Any] = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = "this is a test"
__lowerCAmelCase = "this is a test"
return input_text, output_text
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(_A ) , 3_0_0_0_1 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "This is a test"
__lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DebertaVaTokenizer(_A )
__lowerCAmelCase = tokenizer.encode("sequence builders" )
__lowerCAmelCase = tokenizer.encode("multi-sequence build" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 92 | 0 |
__A = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__A = {value: key for key, value in encode_dict.items()}
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
if set(UpperCamelCase__ ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__lowerCamelCase = """"""
for word in coded.split():
while len(UpperCamelCase__ ) != 0:
decoded += decode_dict[word[:5]]
__lowerCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 360 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__A = logging.get_logger(__name__)
__A = TypeVar("DatasetType", Dataset, IterableDataset)
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[List[float]] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
else:
return _interleave_iterable_datasets(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , stopping_strategy=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[DatasetType] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[NamedSplit] = None , UpperCamelCase__ : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase__ ):
if not isinstance(UpperCamelCase__ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(UpperCamelCase__ )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCamelCase__ ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase__ ).__name__}.""" )
if i == 0:
__lowerCamelCase , __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
else:
return _concatenate_iterable_datasets(UpperCamelCase__ , info=UpperCamelCase__ , split=UpperCamelCase__ , axis=UpperCamelCase__ )
| 348 | 0 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Dict ):
'''simple docstring'''
snake_case_ = torch.load(snake_case , map_location="cpu" )
snake_case_ = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
snake_case_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ = v
else:
snake_case_ = v
snake_case_ = chkpt["params"]
snake_case_ = {n: v for n, v in config.items() if not isinstance(snake_case , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ = chkpt["dico_word2id"]
snake_case_ = {s + "</w>" if s.find("@@" ) == -1 and i > 1_3 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
snake_case_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
snake_case_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(snake_case , snake_case )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , indent=2 ) + "\n" )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , indent=2 ) + "\n" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 85 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase_ : Dict = logging.getLogger(__name__)
lowerCamelCase_ : Tuple = """pytorch_model.bin"""
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, )
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
__lowerCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "A csv or a json file containing the validation data."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The name of the task to train on."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
__lowerCAmelCase = dataclasses.field(
default="accuracy", metadata={"help": "The evaluation metric used for the task."} )
__lowerCAmelCase = dataclasses.field(
default="no", metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
}, )
__lowerCAmelCase = dataclasses.field(
default=10, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
__lowerCAmelCase = dataclasses.field(
default=0.0, metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to fine-tune on labeled data after pseudo training."}, )
__lowerCAmelCase = dataclasses.field(
default=0.0, metadata={"help": "Confidence threshold for pseudo-labeled data filtering."}, )
__lowerCAmelCase = dataclasses.field(
default=100, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Random seed for initialization."}, )
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
a =dataset.filter(lambda lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
a =int(eval_result * len(lowercase ) )
print(lowercase )
a =dataset.sort('''probability''' , reverse=lowercase )
a =dataset.select(range(lowercase ) )
a =dataset.remove_columns(['''label''', '''probability'''] )
a =dataset.rename_column('''prediction''' , '''label''' )
a =dataset.map(lambda lowercase : {"label": idalabel[example["label"]]} )
a =dataset.shuffle(seed=args.seed )
a =os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase , index=lowercase )
else:
dataset.to_json(lowercase )
def _A ( lowercase , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
a =STModelArguments(model_name_or_path=lowercase )
a =STDataArguments(train_file=lowercase , infer_file=lowercase )
a =STTrainingArguments(output_dir=lowercase )
a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase ).items():
setattr(lowercase , lowercase , lowercase )
for key, value in kwargs.items():
if hasattr(lowercase , lowercase ):
setattr(lowercase , lowercase , lowercase )
# Sanity checks
a ={}
a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
a =args.train_file
a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
a =args.eval_file
for key in data_files:
a =data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
a =extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
a =f'''{args.output_dir}/self-train_iter-{{}}'''.format
a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
accelerator.wait_for_everyone()
a =None
a =None
a =0
a =False
# Show the progress bar
a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
a =data_dir_format(lowercase )
assert os.path.exists(lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
a =os.path.join(lowercase , '''stage-1''' )
a ={
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase , lowercase ):
arguments_dict.update({key: value} )
a =os.path.join(lowercase , '''best-checkpoint''' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowercase , lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
a =os.path.join(lowercase , '''best-checkpoint''' )
a =os.path.join(lowercase , '''stage-2''' )
# Update arguments_dict
a =model_path
a =data_files['''train''']
a =current_output_dir
a =os.path.join(lowercase , '''best-checkpoint''' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowercase , lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowercase )
a =iteration
a =data_dir_format(iteration + 1 )
a =AutoConfig.from_pretrained(os.path.join(lowercase , '''best-checkpoint''' ) )
a =config.idalabel
a =os.path.join(lowercase , '''eval_results_best-checkpoint.json''' )
a =os.path.join(lowercase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowercase )
with open(lowercase , '''r''' ) as f:
a =float(json.load(lowercase )[args.eval_metric] )
a =os.path.join(lowercase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowercase )
# Loading the dataset from local csv or json files.
a =load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
a =load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowercase , exist_ok=lowercase )
shutil.copy(lowercase , os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase ):
shutil.copy(lowercase , os.path.join(lowercase , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
accelerator.wait_for_everyone()
a =os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
a =eval_result
if best_iteration is None:
a =new_iteration
a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
a =new_iteration
a =new_eval_result
a =0
else:
if new_eval_result == best_eval_result:
a =new_iteration
a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowercase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase , '''eval_results_best-iteration.json''' ) , ) | 81 | 0 |
"""simple docstring"""
from typing import Any
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
class __lowerCamelCase :
def __init__(self ):
'''simple docstring'''
_lowerCAmelCase = None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.head
while temp is not None:
print(temp.data , end=""" """ )
_lowerCAmelCase = temp.next
print()
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = Node(lowerCamelCase )
_lowerCAmelCase = self.head
_lowerCAmelCase = new_node
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCAmelCase = node_a.next
_lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCAmelCase , _lowerCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list() | 317 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 317 | 1 |
import math
def _lowercase ( lowercase__ , lowercase__ ):
return math.pow(lowercase__ , 2 ) - a
def _lowercase ( lowercase__ ):
return 2 * x
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = 2.0
while start <= a:
__lowerCAmelCase : Dict = math.pow(lowercase__ , 2 )
return start
def _lowercase ( lowercase__ , lowercase__ = 9_9_9_9 , lowercase__ = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
if a < 0:
raise ValueError('''math domain error''' )
__lowerCAmelCase : List[Any] = get_initial_point(lowercase__ )
for _ in range(lowercase__ ):
__lowerCAmelCase : List[Any] = value
__lowerCAmelCase : Any = value - fx(lowercase__ , lowercase__ ) / fx_derivative(lowercase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 275 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
def constraint_to_multiple_of(lowercase__ , lowercase__ , lowercase__=0 , lowercase__=None ):
__lowerCAmelCase : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCAmelCase : Any = math.ceil(val / multiple ) * multiple
return x
__lowerCAmelCase : Dict = (output_size, output_size) if isinstance(lowercase__ , lowercase__ ) else output_size
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = get_image_size(lowercase__ )
__lowerCAmelCase, __lowerCAmelCase : int = output_size
# determine new height and width
__lowerCAmelCase : Optional[Any] = output_height / input_height
__lowerCAmelCase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCAmelCase : str = scale_width
else:
# fit height
__lowerCAmelCase : str = scale_height
__lowerCAmelCase : Any = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase__ )
__lowerCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase__ )
return (new_height, new_width)
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = False , A_ = 1 , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ) ->None:
'''simple docstring'''
super().__init__(**A_ )
__lowerCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384}
__lowerCAmelCase : Dict = get_size_dict(A_ )
__lowerCAmelCase : Optional[Any] = do_resize
__lowerCAmelCase : int = size
__lowerCAmelCase : Dict = keep_aspect_ratio
__lowerCAmelCase : List[Any] = ensure_multiple_of
__lowerCAmelCase : Tuple = resample
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Any = rescale_factor
__lowerCAmelCase : List[Any] = do_normalize
__lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , A_ , A_ , A_ = False , A_ = 1 , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
__lowerCAmelCase : int = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(
A_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=A_ , multiple=A_ , )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ = None , **A_ , ) ->Dict:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ = None , **A_ , ) ->np.ndarray:
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def UpperCamelCase__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) ->PIL.Image.Image:
'''simple docstring'''
__lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Optional[int] = size if size is not None else self.size
__lowerCAmelCase : Union[str, Any] = get_size_dict(A_ )
__lowerCAmelCase : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCAmelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCAmelCase : Tuple = resample if resample is not None else self.resample
__lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase : Any = [to_numpy_array(A_ ) for image in images]
if do_resize:
__lowerCAmelCase : Optional[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
__lowerCAmelCase : Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__lowerCAmelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
__lowerCAmelCase : Dict = {'''pixel_values''': images}
return BatchFeature(data=A_ , tensor_type=A_ )
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(A_ ):
__lowerCAmelCase : Optional[int] = target_sizes.numpy()
__lowerCAmelCase : List[str] = []
for idx in range(len(A_ ) ):
__lowerCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ )
__lowerCAmelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
__lowerCAmelCase : Any = logits.argmax(dim=1 )
__lowerCAmelCase : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 275 | 1 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase__ = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class __snake_case :
a__ = 42
a__ = None
a__ = None
a__ = None
a__ = None
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__ , a__ , a__: int = _str_to_version_tuple(self.version_str)
def __repr__( self) -> Union[str, Any]:
'''simple docstring'''
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.major, self.minor, self.patch
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
if isinstance(lowercase , lowercase):
return Version(lowercase)
elif isinstance(lowercase , lowercase):
return other
raise TypeError(f'{other} (type {type(lowercase)}) cannot be compared to version.')
def __eq__( self , lowercase) -> Any:
'''simple docstring'''
try:
a__: List[Any] = self._validate_operand(lowercase)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: Any = self._validate_operand(lowercase)
return self.tuple < other.tuple
def __hash__( self) -> int:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def lowerCamelCase_ ( cls , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: str = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.version_str
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Dict = _VERSION_REG.match(_SCREAMING_SNAKE_CASE )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(_SCREAMING_SNAKE_CASE ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
return ".".join(str(_SCREAMING_SNAKE_CASE ) for v in version_tuple )
| 203 | """simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __a ( _SCREAMING_SNAKE_CASE ) ->Tuple:
a__: Tuple = {}
a__: Tuple = job['started_at']
a__: int = job['completed_at']
a__: Any = date_parser.parse(_SCREAMING_SNAKE_CASE )
a__: Tuple = date_parser.parse(_SCREAMING_SNAKE_CASE )
a__: str = round((end_datetime - start_datetime).total_seconds() / 60.0 )
a__: Any = start
a__: Dict = end
a__: Optional[int] = duration_in_min
return job_info
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
a__: Tuple = None
if token is not None:
a__: List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
a__: int = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
a__: Union[str, Any] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
a__: str = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result['jobs']} )
a__: Dict = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
a__: str = requests.get(url + F'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_time.update({job['name']: extract_time_from_single_job(_SCREAMING_SNAKE_CASE ) for job in result['jobs']} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
lowercase__ = parser.parse_args()
lowercase__ = get_job_time(args.workflow_run_id)
lowercase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"{k}: {v['duration']}")
| 203 | 1 |
"""simple docstring"""
from collections import defaultdict
class a__ :
def __init__( self , _a , _a ):
lowercase : List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowercase : int = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_a ) )
]
lowercase : int = defaultdict(_a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowercase : int = (1 << len(_a )) - 1
def __magic_name__ ( self , _a , _a ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowercase : Any = self.count_ways_until(_a , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowercase : str = total_ways_util
return self.dp[mask][task_no]
def __magic_name__ ( self , _a ):
# Store the list of persons for each task
for i in range(len(_a ) ):
for j in task_performed[i]:
self.task[j].append(_a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_A : Union[str, Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_A : Optional[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 202 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_A : Optional[Any] = 1_00
_A : Optional[int] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __magic_name__ ( __snake_case : int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase : set[int] = set()
lowercase : int
lowercase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __magic_name__ ( __snake_case : int = 5000 ) -> int | None:
for number_to_partition in range(1 , __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 202 | 1 |
"""simple docstring"""
UpperCamelCase_ = {str(digit): digit**5 for digit in range(10)}
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCAmelCase ) )
def UpperCamelCase ( ) ->Any:
"""simple docstring"""
return sum(
number
for number in range(1_000 , 1_000_000 )
if number == digits_fifth_powers_sum(UpperCAmelCase ) )
if __name__ == "__main__":
print(solution()) | 360 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 303 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ReformerTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = ReformerTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : str = True
def _a (self ):
super().setUp()
A_ : Optional[int] = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
A_ : List[str] = """<s>"""
A_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _a (self ):
A_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__lowerCAmelCase ) , 1000 )
def _a (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a (self ):
if not self.test_rust_tokenizer:
return
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : int = """I was born in 92000, and this is falsé."""
A_ : str = tokenizer.tokenize(__lowerCAmelCase )
A_ : str = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
A_ : Any = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
A_ : Any = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
A_ : List[Any] = self.get_rust_tokenizer()
A_ : Any = tokenizer.encode(__lowerCAmelCase )
A_ : Optional[int] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _a (self , lowercase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : Any = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
A_ : int = """This is a simple input"""
A_ : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
A_ : Optional[Any] = ("""This is a simple input""", """This is a pair""")
A_ : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , )
def _a (self ):
pass
def _a (self ):
A_ : Optional[Any] = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
A_ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
A_ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : Tuple = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A_ : Optional[int] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _a (self ):
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def _a (self ):
A_ : Tuple = """Hello World!"""
A_ : Tuple = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase ) )
@slow
def _a (self ):
A_ : Tuple = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A_ : Tuple = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase ) )
@require_torch
@slow
def _a (self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A_ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
A_ : List[str] = """ """.join(__lowerCAmelCase )
A_ : Dict = self.big_tokenizer.encode_plus(__lowerCAmelCase , return_tensors="""pt""" )
A_ : str = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
A_ : Union[str, Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A_ : Dict = encoded_sequence["""input_ids"""].shape
A_ : Tuple = ReformerModel(__lowerCAmelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowerCAmelCase )
model(**__lowerCAmelCase )
@slow
def _a (self ):
# fmt: off
A_ : Union[str, Any] = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A_ : Optional[Any] = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=__lowerCAmelCase , sequences=__lowerCAmelCase , ) | 206 | """simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase__ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
UpperCAmelCase__ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
UpperCAmelCase__ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ):
_UpperCAmelCase = recall_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , )
return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
| 289 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : Tuple = logging.get_logger(__name__)
A : Dict = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class A ( A__ , A__ ):
'''simple docstring'''
A__ = "convnextv2"
def __init__(self : List[Any] , _UpperCAmelCase : int=3 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__A )
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = num_stages
lowercase__ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowercase__ = [3, 3, 9, 3] if depths is None else depths
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = drop_path_rate
lowercase__ = image_size
lowercase__ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
lowercase__ = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 351 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A : List[Any] = logging.get_logger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 146 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = OpenAIGPTTokenizer
A_ : Optional[Any] = OpenAIGPTTokenizerFast
A_ : Optional[int] = True
A_ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) )
__A = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file, '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ):
'''simple docstring'''
return "lower newer", "lower newer"
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = OpenAIGPTTokenizer(self.vocab_file, self.merges_file )
__A = '''lower'''
__A = ['''low''', '''er</w>''']
__A = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
__A = tokens + ['''<unk>''']
__A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : List[str]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__A = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase, **_lowerCamelCase )
# Simple input
__A = '''This is a simple input'''
__A = ['''This is a simple input 1''', '''This is a simple input 2''']
__A = ('''This is a simple input''', '''This is a pair''')
__A = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_lowerCamelCase, tokenizer_r.encode, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''' )
# Simple input
self.assertRaises(_lowerCamelCase, tokenizer_r.encode_plus, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''' )
# Simple input
self.assertRaises(
_lowerCamelCase, tokenizer_r.batch_encode_plus, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''', )
# Pair input
self.assertRaises(_lowerCamelCase, tokenizer_r.encode, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''' )
# Pair input
self.assertRaises(_lowerCamelCase, tokenizer_r.encode_plus, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''' )
# Pair input
self.assertRaises(
_lowerCamelCase, tokenizer_r.batch_encode_plus, _lowerCamelCase, max_length=_lowerCamelCase, padding='''max_length''', )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
pass
| 266 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not postfix_notation:
return 0
__A = {'''+''', '''-''', '''*''', '''/'''}
__A = []
for token in postfix_notation:
if token in operations:
__A , __A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 | 1 |
class _lowercase :
"""simple docstring"""
def __init__(self ):
"""simple docstring"""
a = {} # Mapping from char to TrieNode
a = False
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
for word in words:
self.insert(lowercase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self
for char in word:
if char not in curr.nodes:
a = TrieNode()
a = curr.nodes[char]
a = True
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = self
for char in word:
if char not in curr.nodes:
return False
a = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
def _delete(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
if index == len(lowercase_ ):
# If word does not exist
if not curr.is_leaf:
return False
a = False
return len(curr.nodes ) == 0
a = word[index]
a = curr.nodes.get(lowercase_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
a = _delete(lowercase_ , lowercase_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowercase_ , 0 )
def a( A : Any , A : str ) -> None:
if node.is_leaf:
print(__UpperCAmelCase , end=" " )
for key, value in node.nodes.items():
print_words(__UpperCAmelCase , word + key )
def a( ) -> bool:
a = "banana bananas bandana band apple all beast".split()
a = TrieNode()
root.insert_many(__UpperCAmelCase )
# print_words(root, "")
assert all(root.find(__UpperCAmelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def a( A : Any , A : List[str] ) -> None:
print(str(__UpperCAmelCase ) , "works!" if passes else "doesn\'t work :(" )
def a( ) -> None:
assert test_trie()
def a( ) -> None:
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 355 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = scope
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = ids_tensor([self.batch_size] , self.num_choices )
a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ (self ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = LlamaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = True
a = LlamaModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = True
a = True
a = LlamaForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# first forward pass
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ , )
a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([input_mask, next_mask] , dim=-1 )
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["hidden_states"][0]
a = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["hidden_states"][0]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__A = (LlamaForCausalLM,) if is_torch_available() else ()
__A = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = False
__A = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = LlamaModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "single_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = 3
a = "multi_label_classification"
a = input_dict["input_ids"]
a = input_ids.ne(1 ).to(lowerCamelCase_ )
a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a = LlamaForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = ids_tensor([1, 10] , config.vocab_size )
a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = LlamaModel(lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
original_model.eval()
a = original_model(lowerCamelCase_ ).last_hidden_state
a = original_model(lowerCamelCase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a = {"type": scaling_type, "factor": 10.0}
a = LlamaModel(lowerCamelCase_ )
scaled_model.to(lowerCamelCase_ )
scaled_model.eval()
a = scaled_model(lowerCamelCase_ ).last_hidden_state
a = scaled_model(lowerCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-5 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
a = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
a = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
# Expected mean on dim = -1
a = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [1, 306, 4658, 278, 6593, 310, 2834, 338]
a = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
a = model(torch.tensor(lowerCamelCase_ ) )
a = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
a = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
a = "Simply put, the theory of relativity states that "
a = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
a = tokenizer.encode(lowerCamelCase_ , return_tensors="pt" )
a = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=lowerCamelCase_ )
# greedy generation outputs
a = model.generate(lowerCamelCase_ , max_new_tokens=64 , top_p=lowerCamelCase_ , temperature=1 , do_sample=lowerCamelCase_ )
a = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 71 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = CLIPTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizerFast
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : List[Any] = {}
__SCREAMING_SNAKE_CASE : Tuple = False
def a ( self ):
super().setUp()
# fmt: off
snake_case_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
snake_case_ = dict(zip(__A , range(len(__A ) ) ) )
snake_case_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
snake_case_ = {"unk_token": "<unk>"}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__A )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__A )
def a ( self , snake_case ):
snake_case_ = "lower newer"
snake_case_ = "lower newer"
return input_text, output_text
def a ( self ):
snake_case_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ = "lower newer"
snake_case_ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
snake_case_ = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@require_ftfy
def a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.tokenizer_class.from_pretrained(__A , **__A )
snake_case_ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
snake_case_ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
snake_case_ = tokenizer_s.tokenize(__A )
snake_case_ = tokenizer_r.tokenize(__A )
self.assertListEqual(__A , __A )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case_ = "xa\u0303y" + " " + "x\xe3y"
snake_case_ = tokenizer_s.tokenize(__A )
snake_case_ = tokenizer_r.tokenize(__A )
self.assertListEqual(__A , __A )
# Test that the tokenization is identical on unicode of space type
snake_case_ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case_ = tokenizer_s.tokenize(__A )
snake_case_ = tokenizer_r.tokenize(__A )
self.assertListEqual(__A , __A )
# Test that the tokenization is identical on unicode of line break type
snake_case_ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case_ = tokenizer_s.tokenize(__A )
snake_case_ = tokenizer_r.tokenize(__A )
self.assertListEqual(__A , __A )
def a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , )
snake_case_ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__A ) + 1, len(__A ) + 1 + len(__A )) , )
snake_case_ = F''' {text}'''
snake_case_ = self.rust_tokenizer_class.from_pretrained(
__A , use_fast=__A , )
snake_case_ = tokenizer_r(__A , return_offsets_mapping=__A , add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) , )
def a ( self ):
with self.assertRaises(__A ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def a ( self ):
super().test_tokenization_python_rust_equals()
def a ( self ):
pass
| 285 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 283 | 0 |
'''simple docstring'''
class __magic_name__ :
def __init__( self : Optional[Any] ):
lowercase_ : int = """"""
lowercase_ : str = """"""
lowercase_ : Dict = []
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase_ : int = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase_ : Union[str, Any] = self.__min_dist_top_down_dp(lowercase_ , n - 1 )
lowercase_ : List[Any] = self.__min_dist_top_down_dp(m - 1 , lowercase_ )
lowercase_ : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase_ : Optional[Any] = 1 + min(lowercase_ , lowercase_ , lowercase_ )
return self.dp[m][n]
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : str ):
lowercase_ : Any = worda
lowercase_ : Union[str, Any] = worda
lowercase_ : Optional[int] = [[-1 for _ in range(len(lowercase_ ) )] for _ in range(len(lowercase_ ) )]
return self.__min_dist_top_down_dp(len(lowercase_ ) - 1 , len(lowercase_ ) - 1 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : str ):
lowercase_ : Any = worda
lowercase_ : int = worda
lowercase_ : str = len(lowercase_ )
lowercase_ : int = len(lowercase_ )
lowercase_ : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase_ : Tuple = j
elif j == 0: # second string is empty
lowercase_ : str = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase_ : Any = self.dp[i - 1][j - 1]
else:
lowercase_ : str = self.dp[i][j - 1]
lowercase_ : Union[str, Any] = self.dp[i - 1][j]
lowercase_ : List[Any] = self.dp[i - 1][j - 1]
lowercase_ : int = 1 + min(lowercase_ , lowercase_ , lowercase_ )
return self.dp[m][n]
if __name__ == "__main__":
_lowercase : int = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
_lowercase : List[str] = input("Enter the first string: ").strip()
_lowercase : Union[str, Any] = input("Enter the second string: ").strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 354 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
class A_ ( enum.Enum ):
'''simple docstring'''
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Optional[int] = 1
@add_end_docstrings(__lowerCamelCase )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = """generated"""
def __init__( self , *snake_case , **snake_case ):
super().__init__(*snake_case , **snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , **snake_case , ):
lowercase = {}
if truncation is not None:
lowercase = truncation
lowercase = generate_kwargs
lowercase = {}
if return_tensors is not None and return_type is None:
lowercase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase = return_type
if clean_up_tokenization_spaces is not None:
lowercase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase = self.tokenizer.encode(snake_case , add_special_tokens=snake_case )
if len(snake_case ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowercase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
return True
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , snake_case ):
lowercase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , snake_case ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
lowercase = ([prefix + arg for arg in args[0]],)
lowercase = True
elif isinstance(args[0] , snake_case ):
lowercase = (prefix + args[0],)
lowercase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase = self.tokenizer(*snake_case , padding=snake_case , truncation=snake_case , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *snake_case , **snake_case ):
lowercase = super().__call__(*snake_case , **snake_case )
if (
isinstance(args[0] , snake_case )
and all(isinstance(snake_case , snake_case ) for el in args[0] )
and all(len(snake_case ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case ):
lowercase = self._parse_and_tokenize(snake_case , truncation=snake_case , **snake_case )
return inputs
def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ):
if self.framework == "pt":
lowercase , lowercase = model_inputs['input_ids'].shape
elif self.framework == "tf":
lowercase , lowercase = tf.shape(model_inputs['input_ids'] ).numpy()
lowercase = generate_kwargs.get('min_length' , self.model.config.min_length )
lowercase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(snake_case , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
lowercase = self.model.generate(**snake_case , **snake_case )
lowercase = output_ids.shape[0]
if self.framework == "pt":
lowercase = output_ids.reshape(snake_case , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase = tf.reshape(snake_case , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=ReturnType.TEXT , snake_case=False ):
lowercase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
}
records.append(snake_case )
return records
@add_end_docstrings(__lowerCamelCase )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = """summary"""
def __call__( self , *snake_case , **snake_case ):
return super().__call__(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(__lowerCamelCase )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """translation"""
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , snake_case=TruncationStrategy.DO_NOT_TRUNCATE , snake_case=None , snake_case=None ):
if getattr(self.tokenizer , '_build_translation_inputs' , snake_case ):
return self.tokenizer._build_translation_inputs(
*snake_case , return_tensors=self.framework , truncation=snake_case , src_lang=snake_case , tgt_lang=snake_case )
else:
return super()._parse_and_tokenize(*snake_case , truncation=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , snake_case=None , **snake_case ):
lowercase , lowercase , lowercase = super()._sanitize_parameters(**snake_case )
if src_lang is not None:
lowercase = src_lang
if tgt_lang is not None:
lowercase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase = kwargs.get('task' , self.task )
lowercase = task.split('_' )
if task and len(snake_case ) == 4:
# translation, XX, to YY
lowercase = items[1]
lowercase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *snake_case , **snake_case ):
return super().__call__(*snake_case , **snake_case )
| 195 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {'shortest_edge': 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size' )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case , param_name='size' , default_to_square=snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case )
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = {'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 195 | 1 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,**A ):
super().__init__(*A ,**A )
UpperCAmelCase = {}
def _UpperCamelCase ( self ,A ,*A ,**A ):
UpperCAmelCase = super().add_tokens(A ,*A ,**A )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def _UpperCamelCase ( self ,A ,*A ,A=1 ,**A ):
UpperCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(A ,*A ,**A )
output.append(A )
else:
UpperCAmelCase = []
for i in range(A ):
UpperCAmelCase = placeholder_token + F'''_{i}'''
self.try_adding_tokens(A ,*A ,**A )
output.append(A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
UpperCAmelCase = output
def _UpperCamelCase ( self ,A ,A=False ,A=1.0 ):
if isinstance(A ,A ):
UpperCAmelCase = []
for i in range(len(A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCAmelCase = self.token_map[placeholder_token]
UpperCAmelCase = tokens[: 1 + int(len(A ) * prop_tokens_to_load )]
if vector_shuffle:
UpperCAmelCase = copy.copy(A )
random.shuffle(A )
UpperCAmelCase = text.replace(A ,""" """.join(A ) )
return text
def __call__( self ,A ,*A ,A=False ,A=1.0 ,**A ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
A ,vector_shuffle=A ,prop_tokens_to_load=A ) ,*A ,**A ,)
def _UpperCamelCase ( self ,A ,*A ,A=False ,A=1.0 ,**A ):
return super().encode(
self.replace_placeholder_tokens_in_text(
A ,vector_shuffle=A ,prop_tokens_to_load=A ) ,*A ,**A ,)
| 234 |
"""simple docstring"""
_UpperCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCamelCase = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert len(str(_snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCAmelCase = year // 100
UpperCAmelCase = (5 * (century % 4) + 2) % 7
UpperCAmelCase = year % 100
UpperCAmelCase = centurian % 12
UpperCAmelCase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCAmelCase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCAmelCase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 | 1 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=0 ) -> Tuple:
# Format the message.
if name is None:
A__ = None
else:
A__ = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
A__ = fmt.format(lowercase_ )
# Print and recurse (if needed).
if isinstance(lowercase_ , lowercase_ ):
if msg is not None:
print(lowercase_ )
for k in val.keys():
recursive_print(lowercase_ , val[k] , spaces + 2 )
elif isinstance(lowercase_ , torch.Tensor ):
print(lowercase_ , ":" , val.size() )
else:
print(lowercase_ , ":" , lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
A__ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
A__ = (num_heads, hidden_size, num_splits) + input_shape[1:]
A__ = param.view(*lowercase_ )
A__ = param.transpose(0 , 2 )
A__ = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
A__ = (num_heads, num_splits, hidden_size) + input_shape[1:]
A__ = param.view(*lowercase_ )
A__ = param.transpose(0 , 1 ).contiguous()
A__ = param.view(*lowercase_ )
return param
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
# The converted output model.
A__ = {}
# old versions did not store training args
A__ = input_state_dict.get("args" , lowercase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
A__ = ds_args.padded_vocab_size
A__ = ds_args.max_position_embeddings
A__ = ds_args.hidden_size
A__ = ds_args.num_layers
A__ = ds_args.num_attention_heads
A__ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
A__ = config.n_head
# The hidden_size per head.
A__ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
A__ = input_state_dict["checkpoint_version"]
else:
A__ = 0.0
# The model.
A__ = input_state_dict["model"]
# The language model.
A__ = model["language_model"]
# The embeddings.
A__ = lm["embedding"]
# The word embeddings.
A__ = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
A__ = word_embeddings[: config.vocab_size, :]
A__ = word_embeddings
# The position embeddings.
A__ = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
A__ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
A__ = pos_embeddings
# The transformer.
A__ = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
A__ = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
A__ = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
A__ = layer_re.match(lowercase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
A__ = int(m.group(1 ) )
# The name of the operation.
A__ = m.group(2 )
# Is it a weight or a bias?
A__ = m.group(3 )
# The name of the layer.
A__ = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
A__ = "ln_1" if op_name.startswith("input" ) else "ln_2"
A__ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
A__ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowercase_ , lowercase_ )
A__ = causal_mask
# Insert a "dummy" tensor for masked_bias.
A__ = torch.tensor(-1E4 , dtype=torch.floataa )
A__ = masked_bias
A__ = fix_query_key_value_ordering(lowercase_ , lowercase_ , 3 , lowercase_ , lowercase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
A__ = out_val.transpose(0 , 1 ).contiguous()
# Store.
A__ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
A__ = fix_query_key_value_ordering(lowercase_ , lowercase_ , 3 , lowercase_ , lowercase_ )
# Store. No change of shape.
A__ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
A__ = megatron_to_transformers[op_name]
A__ = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
A__ = megatron_to_transformers[op_name]
A__ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
A__ = transformer["final_layernorm.weight"]
A__ = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
A__ = word_embeddings
# It should be done!
return output_state_dict
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# Create the argument parser.
A__ = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=lowercase_ , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=lowercase_ , help="An optional config json file describing the pre-trained model." , )
A__ = parser.parse_args()
# Extract the basename.
A__ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
A__ = torch.load(lowercase_ , map_location="cpu" )
else:
A__ = torch.load(args.path_to_checkpoint , map_location="cpu" )
A__ = input_state_dict.get("args" , lowercase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
A__ = "gelu_fast"
elif ds_args.openai_gelu:
A__ = "gelu_new"
else:
A__ = "gelu"
else:
# in the very early days this used to be "gelu_new"
A__ = "gelu_new"
# Spell out all parameters in case the defaults change.
A__ = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=lowercase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type="cls_index" , summary_use_proj=lowercase_ , summary_activation=lowercase_ , summary_proj_to_labels=lowercase_ , summary_first_dropout=0.1 , scale_attn_weights=lowercase_ , use_cache=lowercase_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
A__ = GPTaConfig.from_json_file(args.config_file )
A__ = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
A__ = convert_megatron_checkpoint(lowercase_ , lowercase_ , lowercase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowercase_ , lowercase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
A__ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
A__ = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
A__ = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
A__ = "gpt2"
A__ = AutoTokenizer.from_pretrained(lowercase_ )
A__ = type(lowercase_ ).__name__
A__ = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(lowercase_ )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(lowercase_ )
# Store the state_dict to file.
A__ = os.path.join(lowercase_ , "pytorch_model.bin" )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(lowercase_ , lowercase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 247 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
lowercase__ = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(default=A_, metadata={'''help''': '''The input training data file (a text file).'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase_ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] , snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(snake_case_ ) for feature in features]
A__ = len(snake_case_ )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
A__ = list(chain(*snake_case_ ) )
A__ = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def _SCREAMING_SNAKE_CASE ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__, A__, A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__, A__, A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
lowercase_ , data_files=lowercase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [f"""ending{i}""" for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase_ ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase_ )
]
# Flatten out
A__ = list(chain(*lowercase_ ) )
A__ = list(chain(*lowercase_ ) )
# Tokenize
A__ = tokenizer(
lowercase_ , lowercase_ , truncation=lowercase_ , max_length=lowercase_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase_ ):
A__, A__ = eval_predictions
A__ = np.argmax(lowercase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("train" , lowercase_ )
trainer.save_metrics("train" , lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("eval" , lowercase_ )
trainer.save_metrics("eval" , lowercase_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 247 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowercase_ = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
lowercase_ = CLIPTextModel(UpperCAmelCase )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=UpperCAmelCase )
lowercase_ = CLIPTextModelWithProjection(UpperCAmelCase )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=UpperCAmelCase )
lowercase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> List[str]:
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowercase_ = image / 2 + 0.5
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def A__ ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = self.get_dummy_components()
lowercase_ = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
# forward without prompt embeds
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
lowercase_ = 3 * ["this is a negative prompt"]
lowercase_ = negative_prompt
lowercase_ = 3 * [inputs["prompt"]]
lowercase_ = sd_pipe(**UpperCAmelCase )
lowercase_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
lowercase_ = 3 * ["this is a negative prompt"]
lowercase_ = 3 * [inputs.pop("prompt" )]
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = sd_pipe.encode_prompt(UpperCAmelCase , negative_prompt=UpperCAmelCase )
lowercase_ = sd_pipe(
**UpperCAmelCase , prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , pooled_prompt_embeds=UpperCAmelCase , negative_pooled_prompt_embeds=UpperCAmelCase , )
lowercase_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , UpperCAmelCase , UpperCAmelCase="cpu" , UpperCAmelCase=torch.floataa , UpperCAmelCase=0 ) -> str:
'''simple docstring'''
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = np.random.RandomState(UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
lowercase_ = torch.from_numpy(UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase )
lowercase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_inputs(UpperCAmelCase )
lowercase_ = pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase_ = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 297 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297 | 1 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
if not (isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ = len(snake_case__ )
SCREAMING_SNAKE_CASE__ = len(snake_case__ )
SCREAMING_SNAKE_CASE__ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Dict = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
lowerCAmelCase__ : Dict = StableDiffusionLDMaDPipeline
lowerCAmelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self ) -> str:
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
A__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=__UpperCAmelCase ,set_alpha_to_one=__UpperCAmelCase ,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
A__ = CLIPTextModel(__UpperCAmelCase )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> Dict:
if str(__UpperCAmelCase ).startswith('mps' ):
A__ = torch.manual_seed(__UpperCAmelCase )
else:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> str:
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1]
A__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A__ = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
A__ = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case__ ( self ) -> List[str]:
A__ = self.get_dummy_components()
A__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = 3 * [inputs['prompt']]
# forward
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb_slice_a[0, -3:, -3:, -1]
A__ = depth_slice_a[0, -3:, -1]
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = 3 * [inputs.pop('prompt' )]
A__ = ldmad_pipe.tokenizer(
__UpperCAmelCase ,padding='max_length' ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=__UpperCAmelCase ,return_tensors='pt' ,)
A__ = text_inputs['input_ids'].to(__UpperCAmelCase )
A__ = ldmad_pipe.text_encoder(__UpperCAmelCase )[0]
A__ = prompt_embeds
# forward
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb_slice_a[0, -3:, -3:, -1]
A__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case__ ( self ) -> int:
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
A__ = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_dummy_inputs(__UpperCAmelCase )
A__ = 'french fries'
A__ = ldmad_pipe(**__UpperCAmelCase ,negative_prompt=__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1]
A__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A__ = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
A__ = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="cpu" ,__UpperCAmelCase=torch.floataa ,__UpperCAmelCase=0 ) -> Optional[int]:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
A__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ,dtype=__UpperCAmelCase )
A__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> Optional[Any]:
A__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
A__ = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = rgb[0, -3:, -3:, -1].flatten()
A__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
A__ = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
A__ = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="cpu" ,__UpperCAmelCase=torch.floataa ,__UpperCAmelCase=0 ) -> int:
A__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A__ = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
A__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ,dtype=__UpperCAmelCase )
A__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case__ ( self ) -> str:
A__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = 0.4_9_5_5_8_6
A__ = 0.3_3_7_9_5_5_1_5
A__ = 1_1_2.4_8_5_1_8
A__ = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case__ ( self ) -> Optional[int]:
A__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = self.get_inputs(__UpperCAmelCase )
A__ = ldmad_pipe(**__UpperCAmelCase )
A__ , A__ = output.rgb, output.depth
A__ = 0.4_1_9_4_1_2_7
A__ = 0.3_5_3_7_5_5_8_6
A__ = 0.5_6_3_8_5_0_2
A__ = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 154 | 0 |
def A ( _lowerCamelCase = 50 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase = n
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # adjacency matrix for weight
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = w
def A ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
_SCREAMING_SNAKE_CASE = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
_SCREAMING_SNAKE_CASE = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
_SCREAMING_SNAKE_CASE = BeautifulSoup(res.text, 'html.parser')
_SCREAMING_SNAKE_CASE = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''')
| 81 | def snake_case ( ) -> Any:
for n in range(1 , 1_000_000):
yield n * (n + 1) // 2
def snake_case ( snake_case__ :Dict) -> Optional[Any]:
_A = 1
_A = 2
while i * i <= n:
_A = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case ( ) -> Optional[Any]:
return next(i for i in triangle_number_generator() if count_divisors(snake_case__) > 500)
if __name__ == "__main__":
print(solution())
| 81 | 1 |
'''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCAmelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a_ : str = logging.get_logger(__name__)
a_ : Tuple = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """layoutlmv3"""
def __init__( self , __magic_name__=5_02_65 , __magic_name__=7_68 , __magic_name__=12 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=1e-5 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=10_24 , __magic_name__=1_28 , __magic_name__=1_28 , __magic_name__=True , __magic_name__=32 , __magic_name__=1_28 , __magic_name__=64 , __magic_name__=2_56 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=2_24 , __magic_name__=3 , __magic_name__=16 , __magic_name__=None , **__magic_name__ , ) -> Dict:
super().__init__(
vocab_size=__magic_name__ , hidden_size=__magic_name__ , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , intermediate_size=__magic_name__ , hidden_act=__magic_name__ , hidden_dropout_prob=__magic_name__ , attention_probs_dropout_prob=__magic_name__ , max_position_embeddings=__magic_name__ , type_vocab_size=__magic_name__ , initializer_range=__magic_name__ , layer_norm_eps=__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , )
_a = max_ad_position_embeddings
_a = coordinate_size
_a = shape_size
_a = has_relative_attention_bias
_a = rel_pos_bins
_a = max_rel_pos
_a = has_spatial_attention_bias
_a = rel_ad_pos_bins
_a = max_rel_ad_pos
_a = text_embed
_a = visual_embed
_a = input_size
_a = num_channels
_a = patch_size
_a = classifier_dropout
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = version.parse("""1.12""" )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def __UpperCAmelCase ( self ) -> int:
return 12
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 3 , __magic_name__ = 40 , __magic_name__ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , __magic_name__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = processor.tokenizer.num_special_tokens_to_add(__magic_name__ )
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
_a = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_a = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_a = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = dict(
processor(
__magic_name__ , text=__magic_name__ , boxes=__magic_name__ , return_tensors=__magic_name__ , ) )
return inputs
| 168 | 0 |
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : Tuple = int(__lowerCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__lowerCamelCase )
A , A : Any = divmod(__lowerCamelCase , 2 )
return binary_recursive(__lowerCamelCase ) + str(__lowerCamelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
A : Optional[Any] = str(__lowerCamelCase ).strip()
if not number:
raise ValueError("""No input value was provided""" )
A : List[str] = """-""" if number.startswith("""-""" ) else """"""
A : Union[str, Any] = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f'''{negative}0b{binary_recursive(int(__lowerCamelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod() | 368 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : str = tempfile.mkdtemp()
# fmt: off
A : List[Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A : Optional[int] = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A : Union[str, Any] = {"""unk_token""": """<unk>"""}
A : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
A : int = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A : List[Any] = os.path.join(self.tmpdirname, lowerCamelCase__ )
with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token="""!""", **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token="""!""", **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : str = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Optional[int] = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.get_tokenizer()
A : Optional[Any] = self.get_rust_tokenizer()
A : Optional[int] = self.get_image_processor()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase__ )
A : Tuple = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A : str = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Tuple = self.get_image_processor(do_normalize=lowerCamelCase__ )
A : Optional[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : str = self.get_tokenizer()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = self.prepare_image_inputs()
A : Optional[Any] = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Any = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : int = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : Optional[int] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = """lower newer"""
A : Union[str, Any] = processor(text=lowerCamelCase__, return_tensors="""np""" )
A : str = tokenizer(lowerCamelCase__, return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist() )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : List[str] = """lower newer"""
A : Any = self.prepare_image_inputs()
A : Tuple = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : str = """google/owlvit-base-patch32"""
A : Dict = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : str = ["""cat""", """nasa badge"""]
A : Optional[int] = processor(text=lowerCamelCase__ )
A : Any = 16
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Tuple = """google/owlvit-base-patch32"""
A : Any = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : int = [["""cat""", """nasa badge"""], ["""person"""]]
A : List[Any] = processor(text=lowerCamelCase__ )
A : Dict = 16
A : List[str] = len(lowerCamelCase__ )
A : List[str] = max([len(lowerCamelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Dict = """google/owlvit-base-patch32"""
A : int = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : str = ["""cat""", """nasa badge"""]
A : Optional[Any] = processor(text=lowerCamelCase__ )
A : int = 16
A : Optional[Any] = inputs["""input_ids"""]
A : Optional[int] = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (2, seq_length) )
self.assertListEqual(list(input_ids[0] ), predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ), predicted_ids[1] )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Optional[Any] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : Optional[Any] = self.prepare_image_inputs()
A : List[str] = processor(images=lowerCamelCase__, query_images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Any = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[Any] = processor.batch_decode(lowerCamelCase__ )
A : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
| 115 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : str = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["""OwlViTFeatureExtractor"""]
__lowerCamelCase : Optional[int] = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 |
def A_ ( _lowerCAmelCase = 50 ) -> int:
UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {}
UpperCAmelCase = {}
UpperCAmelCase = {}
def _snake_case ( _SCREAMING_SNAKE_CASE : type , _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
lowerCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
lowerCAmelCase = format_type
def _snake_case ( _SCREAMING_SNAKE_CASE : Exception , _SCREAMING_SNAKE_CASE : Optional[str] , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
UpperCAmelCase = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
UpperCAmelCase = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
UpperCAmelCase = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[str] ) -> Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[str] , **_SCREAMING_SNAKE_CASE : List[str] ) -> Formatter:
"""simple docstring"""
lowerCAmelCase = get_format_type_from_alias(_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' ) | 187 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(A_ )
from datasets import load_dataset
lowerCAmelCase = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCAmelCase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase = image_processor(A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**A_ )
lowerCAmelCase = outputs.logits
lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , A_ )
lowerCAmelCase = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=A_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) ) | 187 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] ):
# Construct model
if gpta_config_file == "":
lowerCAmelCase = GPTaConfig()
else:
lowerCAmelCase = GPTaConfig.from_json_file(_A )
lowerCAmelCase = GPTaModel(_A )
# Load weights from numpy
load_tf_weights_in_gpta(_A , _A , _A )
# Save pytorch-model
lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _A )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
__snake_case =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 4 |
def __UpperCamelCase ( _A = 1000000 ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = {1: 1}
for inputa in range(2 , _A ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ = counter
if counter > pre_counter:
lowerCAmelCase_ = inputa
lowerCAmelCase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 278 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCamelCase = '''bart'''
UpperCamelCase = True
@st.cache(allow_output_mutation=snake_case__ )
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
_SCREAMING_SNAKE_CASE = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
_SCREAMING_SNAKE_CASE = qar_model.eval()
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (None, None)
if MODEL_TYPE == "bart":
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
_SCREAMING_SNAKE_CASE = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
_SCREAMING_SNAKE_CASE = sas_model.eval()
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = make_qa_sas_model(
model_name="""t5-small""" ,from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" ,device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_SCREAMING_SNAKE_CASE = faiss.StandardGpuResources()
_SCREAMING_SNAKE_CASE = datasets.load_dataset(path="""wiki_snippets""" ,name="""wiki40b_en_100_0""" )["""train"""]
_SCREAMING_SNAKE_CASE = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" ,dtype="""float32""" ,mode="""r""" ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(1_28 )
_SCREAMING_SNAKE_CASE = faiss.index_cpu_to_gpu(snake_case__ ,1 ,snake_case__ )
wikiaab_gpu_index_flat.add(snake_case__ ) # TODO fix for larger GPU
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (None, None)
_SCREAMING_SNAKE_CASE = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case__ )
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = datasets.load_dataset("""eli5""" ,name="""LFQA_reddit""" )
_SCREAMING_SNAKE_CASE = elia["""train_eli5"""]
_SCREAMING_SNAKE_CASE = np.memmap(
"""eli5_questions_reps.dat""" ,dtype="""float32""" ,mode="""r""" ,shape=(elia_train.num_rows, 1_28) )
_SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(snake_case__ )
return (elia_train, eli5_train_q_index)
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_indexes()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = load_models()
UpperCamelCase , UpperCamelCase = load_train_data()
def __lowerCamelCase ( snake_case__ ,snake_case__=10 ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = embed_questions_for_retrieval([question] ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = eli5_train_q_index.search(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = [elia_train[int(snake_case__ )] for i in I[0]]
return nn_examples
def __lowerCamelCase ( snake_case__ ,snake_case__="wiki40b" ,snake_case__="dense" ,snake_case__=10 ) -> Union[str, Any]:
"""simple docstring"""
if source == "none":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = query_qa_dense_index(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = query_es_index(
snake_case__ ,snake_case__ ,index_name="""english_wiki40b_snippets_100w""" ,n_results=snake_case__ ,)
_SCREAMING_SNAKE_CASE = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
_SCREAMING_SNAKE_CASE = """question: {} context: {}""".format(snake_case__ ,snake_case__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case__ : None),
} )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=64 ,snake_case__=2_56 ,snake_case__=False ,snake_case__=2 ,snake_case__=0.95 ,snake_case__=0.8 ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
_SCREAMING_SNAKE_CASE = qa_sas_generate(
snake_case__ ,snake_case__ ,snake_case__ ,num_answers=1 ,num_beams=snake_case__ ,min_len=snake_case__ ,max_len=snake_case__ ,do_sample=snake_case__ ,temp=snake_case__ ,top_p=snake_case__ ,top_k=snake_case__ ,max_input_length=10_24 ,device="""cuda:0""" ,)[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
UpperCamelCase = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
UpperCamelCase = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCamelCase = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCamelCase = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
UpperCamelCase = st.sidebar.checkbox('''Demo options''')
if demo_options:
UpperCamelCase = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
UpperCamelCase = action_list.index(action_st)
UpperCamelCase = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
UpperCamelCase = show_type == '''Show full text of passages'''
else:
UpperCamelCase = 3
UpperCamelCase = True
UpperCamelCase = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
UpperCamelCase = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
UpperCamelCase = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
UpperCamelCase = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
UpperCamelCase = '''wiki40b'''
UpperCamelCase = '''dense'''
UpperCamelCase = '''beam'''
UpperCamelCase = 2
UpperCamelCase = 64
UpperCamelCase = 256
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = st.sidebar.checkbox('''Generation options''')
if generate_options:
UpperCamelCase = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
UpperCamelCase = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
UpperCamelCase = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCamelCase = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCamelCase = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCamelCase = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCamelCase = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCamelCase = None
# start main text
UpperCamelCase = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
UpperCamelCase = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCamelCase = st.text_input('''Enter your question here:''', '''''')
else:
UpperCamelCase = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCamelCase , UpperCamelCase = make_support(question, source=wiki_source, method='''dense''', n_results=10)
UpperCamelCase , UpperCamelCase = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCamelCase = support_list[:10]
UpperCamelCase = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
UpperCamelCase , UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCamelCase , UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
UpperCamelCase = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
UpperCamelCase = res[1].strip()
if sec_titles == "":
UpperCamelCase = '''[{}]({})'''.format(res[0], wiki_url)
else:
UpperCamelCase = sec_titles.split(''' & ''')
UpperCamelCase = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCamelCase = find_nearest_training(question)
UpperCamelCase = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
UpperCamelCase = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
UpperCamelCase = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 351 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __UpperCAmelCase :
def __init__( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: int=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Dict=True , UpperCAmelCase_: str=True , UpperCAmelCase_: List[str]=99 , UpperCAmelCase_: int=32 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: Dict=37 , UpperCAmelCase_: Tuple="gelu" , UpperCAmelCase_: Any=0.1 , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Optional[int]=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[int]=0.02 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: Optional[int]=4 , UpperCAmelCase_: Tuple=None , UpperCAmelCase_: Any=0 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = projection_dim
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
_SCREAMING_SNAKE_CASE = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: int , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRReader(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Dict = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__snake_case : Optional[Any] = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__snake_case : str = False
__snake_case : List[Any] = False
__snake_case : Any = False
__snake_case : List[Any] = False
__snake_case : Tuple = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
_SCREAMING_SNAKE_CASE = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCamelCase = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
lowerCamelCase = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
lowerCamelCase = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCamelCase : List[str] =new_id
# turn into Numpy arrays
_lowerCamelCase : Dict =np.array(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int =np.array(SCREAMING_SNAKE_CASE__ )
if reduce_labels:
_lowerCamelCase : str =255
_lowerCamelCase : List[str] =label - 1
_lowerCamelCase : str =255
_lowerCamelCase : List[Any] =label != ignore_index
_lowerCamelCase : int =np.not_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] =pred_label[mask]
_lowerCamelCase : Any =np.array(SCREAMING_SNAKE_CASE__ )[mask]
_lowerCamelCase : str =pred_label[pred_label == label]
_lowerCamelCase : Tuple =np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
_lowerCamelCase : Tuple =np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
_lowerCamelCase : Tuple =np.histogram(SCREAMING_SNAKE_CASE__ , bins=SCREAMING_SNAKE_CASE__ , range=(0, num_labels - 1) )[0]
_lowerCamelCase : Union[str, Any] =area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
'''simple docstring'''
_lowerCamelCase : int =np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : List[str] =np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : Union[str, Any] =np.zeros((num_labels,) , dtype=np.floataa )
_lowerCamelCase : Optional[int] =np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] =intersect_and_union(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str =total_intersect_and_union(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# compute metrics
_lowerCamelCase : Dict ={}
_lowerCamelCase : str =total_area_intersect.sum() / total_area_label.sum()
_lowerCamelCase : Any =total_area_intersect / total_area_union
_lowerCamelCase : Union[str, Any] =total_area_intersect / total_area_label
_lowerCamelCase : str =np.nanmean(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[str] =np.nanmean(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Tuple =all_acc
_lowerCamelCase : int =iou
_lowerCamelCase : List[str] =acc
if nan_to_num is not None:
_lowerCamelCase : List[Any] ={metric: np.nan_to_num(SCREAMING_SNAKE_CASE__ , nan=SCREAMING_SNAKE_CASE__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def lowerCamelCase ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int , lowercase_ : bool , lowercase_ : Optional[int] = None , lowercase_ : Optional[Dict[int, int]] = None , lowercase_ : bool = False , ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =mean_iou(
results=lowercase_ , gt_seg_maps=lowercase_ , num_labels=lowercase_ , ignore_index=lowercase_ , nan_to_num=lowercase_ , label_map=lowercase_ , reduce_labels=lowercase_ , )
return iou_result
| 199 |
import os
import pytest
from attr import dataclass
lowerCamelCase = 'us-east-1' # defaults region
@dataclass
class A :
UpperCamelCase__ : str
UpperCamelCase__ : Dict ='arn:aws:iam::558105141721:role/sagemaker_execution_role'
UpperCamelCase__ : int ={
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
UpperCamelCase__ : Optional[Any] ={**hyperparameters, 'max_steps': 1000}
@property
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return F'''{self.framework}-transfromers-test'''
@property
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] =SageMakerTestEnvironment(framework=request.cls.framework )
| 199 | 1 |
from __future__ import annotations
from collections.abc import Generator
def lowerCAmelCase_ ( ) -> Generator[int, None, None]:
"""simple docstring"""
a__ : dict[int, int] = {}
a__ : Union[str, Any] = 2
while True:
a__ : int = factor_map.pop(_lowercase , _lowercase)
if factor:
a__ : Any = factor + prime
while x in factor_map:
x += factor
a__ : Tuple = factor
else:
a__ : List[Any] = prime
yield prime
prime += 1
def lowerCAmelCase_ ( _lowercase : float = 1e1_0) -> int:
"""simple docstring"""
a__ : List[Any] = sieve()
a__ : Optional[Any] = 1
while True:
a__ : str = next(_lowercase)
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_lowercase)
n += 2
if __name__ == "__main__":
print(solution())
| 371 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = params
a__ : str = np.array(__lowercase )
a__ : List[Any] = np.array([len(__lowercase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowercase ) -> Any:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.lengths )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.params.max_model_input_size
a__ : int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowercase )} too long sequences.''' )
def divide_chunks(__lowercase , __lowercase ):
return [l[i : i + n] for i in range(0 , len(__lowercase ) , __lowercase )]
a__ : Any = []
a__ : Optional[int] = []
if self.params.mlm:
a__ , a__ : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
a__ , a__ : Dict = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a__ : int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a__ : str = np.insert(__lowercase , 0 , __lowercase )
if sub_s[-1] != sep_id:
a__ : List[str] = np.insert(__lowercase , len(__lowercase ) , __lowercase )
assert len(__lowercase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase )
new_tok_ids.extend(__lowercase )
new_lengths.extend([len(__lowercase ) for l in sub_seqs] )
a__ : Optional[int] = np.array(__lowercase )
a__ : Any = np.array(__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = len(self )
a__ : List[str] = self.lengths > 1_1
a__ : Dict = self.token_ids[indices]
a__ : List[str] = self.lengths[indices]
a__ : int = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ : Union[str, Any] = self.params.special_tok_ids["""unk_token"""]
a__ : List[Any] = len(self )
a__ : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a__ : Optional[Any] = (unk_occs / self.lengths) < 0.5
a__ : Tuple = self.token_ids[indices]
a__ : Union[str, Any] = self.lengths[indices]
a__ : Tuple = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[int] = [t[0] for t in batch]
a__ : Any = [t[1] for t in batch]
assert len(__lowercase ) == len(__lowercase )
# Max for paddings
a__ : List[Any] = max(__lowercase )
# Pad token ids
if self.params.mlm:
a__ : int = self.params.special_tok_ids["""pad_token"""]
else:
a__ : List[str] = self.params.special_tok_ids["""unk_token"""]
a__ : int = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids]
assert len(tk_ ) == len(__lowercase )
assert all(len(__lowercase ) == max_seq_len_ for t in tk_ )
a__ : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
a__ : Optional[int] = torch.tensor(__lowercase ) # (bs)
return tk_t, lg_t
| 266 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''image_processor''']
__SCREAMING_SNAKE_CASE = '''SamImageProcessor'''
def __init__( self,__lowerCamelCase ):
super().__init__(__lowerCamelCase )
A__ = self.image_processor
A__ = -10
A__ = self.image_processor.size['''longest_edge''']
def __call__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase = None,**__lowerCamelCase,):
A__ = self.image_processor(
__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
# pop arguments that are not used in the foward but used nevertheless
A__ = encoding_image_processor['''original_sizes''']
if hasattr(__lowerCamelCase,'''numpy''' ): # Checks if Torch or TF tensor
A__ = original_sizes.numpy()
A__ , A__ , A__ = self._check_and_preprocess_points(
input_points=__lowerCamelCase,input_labels=__lowerCamelCase,input_boxes=__lowerCamelCase,)
A__ = self._normalize_and_convert(
__lowerCamelCase,__lowerCamelCase,input_points=__lowerCamelCase,input_labels=__lowerCamelCase,input_boxes=__lowerCamelCase,return_tensors=__lowerCamelCase,)
return encoding_image_processor
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase="pt",):
if input_points is not None:
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
A__ = [
self._normalize_coordinates(self.target_size,__lowerCamelCase,original_sizes[0] ) for point in input_points
]
else:
A__ = [
self._normalize_coordinates(self.target_size,__lowerCamelCase,__lowerCamelCase )
for point, original_size in zip(__lowerCamelCase,__lowerCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
A__ , A__ = self._pad_points_and_labels(__lowerCamelCase,__lowerCamelCase )
A__ = np.array(__lowerCamelCase )
if input_labels is not None:
A__ = np.array(__lowerCamelCase )
if input_boxes is not None:
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
A__ = [
self._normalize_coordinates(self.target_size,__lowerCamelCase,original_sizes[0],is_bounding_box=__lowerCamelCase )
for box in input_boxes
]
else:
A__ = [
self._normalize_coordinates(self.target_size,__lowerCamelCase,__lowerCamelCase,is_bounding_box=__lowerCamelCase )
for box, original_size in zip(__lowerCamelCase,__lowerCamelCase )
]
A__ = np.array(__lowerCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
A__ = torch.from_numpy(__lowerCamelCase )
# boxes batch size of 1 by default
A__ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
A__ = tf.convert_to_tensor(__lowerCamelCase )
# boxes batch size of 1 by default
A__ = tf.expand_dims(__lowerCamelCase,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
A__ = torch.from_numpy(__lowerCamelCase )
# point batch size of 1 by default
A__ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
A__ = tf.convert_to_tensor(__lowerCamelCase )
# point batch size of 1 by default
A__ = tf.expand_dims(__lowerCamelCase,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
A__ = torch.from_numpy(__lowerCamelCase )
# point batch size of 1 by default
A__ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
A__ = tf.convert_to_tensor(__lowerCamelCase )
# point batch size of 1 by default
A__ = tf.expand_dims(__lowerCamelCase,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = max([point.shape[0] for point in input_points] )
A__ = []
for i, point in enumerate(__lowerCamelCase ):
if point.shape[0] != expected_nb_points:
A__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
A__ = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__lowerCamelCase )
A__ = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=False ):
A__ , A__ = original_size
A__ , A__ = self.image_processor._get_preprocess_shape(__lowerCamelCase,longest_edge=__lowerCamelCase )
A__ = deepcopy(__lowerCamelCase ).astype(__lowerCamelCase )
if is_bounding_box:
A__ = coords.reshape(-1,2,2 )
A__ = coords[..., 0] * (new_w / old_w)
A__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
A__ = coords.reshape(-1,4 )
return coords
def UpperCamelCase ( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,):
if input_points is not None:
if hasattr(__lowerCamelCase,'''numpy''' ): # Checks for TF or Torch tensor
A__ = input_points.numpy().tolist()
if not isinstance(__lowerCamelCase,__lowerCamelCase ) or not isinstance(input_points[0],__lowerCamelCase ):
raise ValueError('''Input points must be a list of list of floating points.''' )
A__ = [np.array(__lowerCamelCase ) for input_point in input_points]
else:
A__ = None
if input_labels is not None:
if hasattr(__lowerCamelCase,'''numpy''' ):
A__ = input_labels.numpy().tolist()
if not isinstance(__lowerCamelCase,__lowerCamelCase ) or not isinstance(input_labels[0],__lowerCamelCase ):
raise ValueError('''Input labels must be a list of list integers.''' )
A__ = [np.array(__lowerCamelCase ) for label in input_labels]
else:
A__ = None
if input_boxes is not None:
if hasattr(__lowerCamelCase,'''numpy''' ):
A__ = input_boxes.numpy().tolist()
if (
not isinstance(__lowerCamelCase,__lowerCamelCase )
or not isinstance(input_boxes[0],__lowerCamelCase )
or not isinstance(input_boxes[0][0],__lowerCamelCase )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
A__ = [np.array(__lowerCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
A__ = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(__lowerCamelCase ) )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.image_processor.post_process_masks(*__lowerCamelCase,**__lowerCamelCase )
| 193 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: List[str] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 193 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase: Union[str, Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class a__ ( A_ ):
_lowerCamelCase = 'vit_msn'
def __init__( self : List[Any], lowerCAmelCase : Optional[Any]=768, lowerCAmelCase : Tuple=12, lowerCAmelCase : int=12, lowerCAmelCase : List[Any]=3072, lowerCAmelCase : int="gelu", lowerCAmelCase : List[str]=0.0, lowerCAmelCase : Dict=0.0, lowerCAmelCase : List[Any]=0.02, lowerCAmelCase : Any=1e-06, lowerCAmelCase : int=224, lowerCAmelCase : Optional[Any]=16, lowerCAmelCase : List[str]=3, lowerCAmelCase : str=True, **lowerCAmelCase : Tuple, ) -> List[Any]:
super().__init__(**_lowerCamelCase )
lowercase : Tuple = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : str = hidden_act
lowercase : Any = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : str = initializer_range
lowercase : List[Any] = layer_norm_eps
lowercase : str = image_size
lowercase : Optional[Any] = patch_size
lowercase : Optional[int] = num_channels
lowercase : Dict = qkv_bias
| 368 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCamelCase: Tuple = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : int, **lowerCAmelCase : str ) -> Any:
super().__init__(**lowerCAmelCase )
requires_backends(self, 'vision' )
requires_backends(self, 'torch' )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCAmelCase )
def lowercase ( self : Optional[int], **lowerCAmelCase : int ) -> Tuple:
lowercase : List[Any] = {}
lowercase : List[str] = {}
lowercase : Optional[int] = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase : List[Any] = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
lowercase : Tuple = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
lowercase : Any = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
lowercase : Dict = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
lowercase : str = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase : List[str] = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
lowercase : List[str] = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
lowercase : str = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
lowercase : Optional[int] = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
lowercase : Dict = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
lowercase : int = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
lowercase : Union[str, Any] = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : List[Any], lowerCAmelCase : List[str], *lowerCAmelCase : Optional[Any], lowerCAmelCase : Dict=None, lowerCAmelCase : Union[str, Any]=None, **lowerCAmelCase : int ) -> List[str]:
return super().__call__(lowerCAmelCase, *lowerCAmelCase, num_workers=lowerCAmelCase, batch_size=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : List[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Tuple=64, lowerCAmelCase : int = 0, lowerCAmelCase : float = 512 / 1500, lowerCAmelCase : Optional[int] = 32, lowerCAmelCase : Optional[int] = 1, ) -> Union[str, Any]:
lowercase : List[Any] = load_image(lowerCAmelCase )
lowercase : str = self.image_processor.size['longest_edge']
lowercase , lowercase , lowercase , lowercase : List[Any] = self.image_processor.generate_crop_boxes(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowercase : Any = self.image_processor(images=lowerCAmelCase, return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
lowercase : Optional[int] = self.get_inference_context()
with inference_context():
lowercase : List[str] = self._ensure_tensor_on_device(lowerCAmelCase, device=self.device )
lowercase : int = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
lowercase : List[Any] = image_embeddings
lowercase : Dict = grid_points.shape[1]
lowercase : Any = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0, lowerCAmelCase, lowerCAmelCase ):
lowercase : Optional[int] = grid_points[:, i : i + points_per_batch, :, :]
lowercase : List[str] = input_labels[:, i : i + points_per_batch]
lowercase : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowercase ( self : Any, lowerCAmelCase : List[str], lowerCAmelCase : str=0.88, lowerCAmelCase : Optional[int]=0.95, lowerCAmelCase : str=0, lowerCAmelCase : Optional[int]=1, ) -> Optional[int]:
lowercase : Optional[int] = model_inputs.pop('input_boxes' )
lowercase : Any = model_inputs.pop('is_last' )
lowercase : Tuple = model_inputs.pop('original_sizes' ).tolist()
lowercase : Union[str, Any] = model_inputs.pop('reshaped_input_sizes' ).tolist()
lowercase : str = self.model(**lowerCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase : str = model_outputs['pred_masks']
lowercase : str = self.image_processor.post_process_masks(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, binarize=lowerCAmelCase )
lowercase : Dict = model_outputs['iou_scores']
lowercase , lowercase , lowercase : int = self.image_processor.filter_masks(
masks[0], iou_scores[0], original_sizes[0], input_boxes[0], lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowercase ( self : Optional[Any], lowerCAmelCase : str, lowerCAmelCase : Tuple=False, lowerCAmelCase : Any=False, lowerCAmelCase : Tuple=0.7, ) -> List[str]:
lowercase : Any = []
lowercase : Optional[Any] = []
lowercase : Optional[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
lowercase : Optional[Any] = torch.cat(lowerCAmelCase )
lowercase : List[Any] = torch.cat(lowerCAmelCase )
lowercase , lowercase , lowercase , lowercase : str = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowercase : str = defaultdict(lowerCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase )
lowercase : Dict = {}
if output_rle_mask:
lowercase : Tuple = rle_mask
if output_bboxes_mask:
lowercase : Tuple = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 53 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 73 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any:
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]:
requires_backends(cls , ['''note_seq'''] ) | 8 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Union[str, Any] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 286 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Callable , UpperCamelCase: Optional[Features] = None , UpperCamelCase: str = None , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: Optional[dict] = None , UpperCamelCase: Optional[int] = None , **UpperCamelCase: List[str] , ):
"""simple docstring"""
super().__init__(
features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , num_proc=UpperCamelCase , **UpperCamelCase , )
A__ = Generator(
cache_dir=UpperCamelCase , features=UpperCamelCase , generator=UpperCamelCase , gen_kwargs=UpperCamelCase , **UpperCamelCase , )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
if self.streaming:
A__ = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=UpperCamelCase , download_mode=UpperCamelCase , verification_mode=UpperCamelCase , base_path=UpperCamelCase , num_proc=self.num_proc , )
A__ = self.builder.as_dataset(
split="""train""" , verification_mode=UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
| 335 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: str = "cpu" , UpperCamelCase_: str = "openai/clip-vit-large-patch14" ):
__lowerCamelCase = device
__lowerCamelCase = CLIPTokenizerFast.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = [0.4814_5466, 0.457_8275, 0.4082_1073]
__lowerCamelCase = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__lowerCamelCase = torchvision.transforms.Resize(2_24 )
__lowerCamelCase = torchvision.transforms.CenterCrop(2_24 )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = self.resize(UpperCamelCase_ )
__lowerCamelCase = self.center_crop(UpperCamelCase_ )
__lowerCamelCase = self.normalize(UpperCamelCase_ )
return images
def __call__( self: Any , UpperCamelCase_: List[str]=None , UpperCamelCase_: Optional[Any]=None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = self.tokenizer(text=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = self.preprocess_img(UpperCamelCase_ )
__lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCamelCase__( nn.Module):
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.01 , UpperCamelCase_: List[str]=None , UpperCamelCase_: Dict=None , UpperCamelCase_: str=None , UpperCamelCase_: str=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: str=None , UpperCamelCase_: List[str]=False , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Union[str, Any]="image" , UpperCamelCase_: str=True , UpperCamelCase_: str=False , UpperCamelCase_: str=False , UpperCamelCase_: Optional[int]=False , ):
super().__init__()
__lowerCamelCase = None
__lowerCamelCase = device if device else get_device()
if vqgan:
__lowerCamelCase = vqgan
else:
__lowerCamelCase = load_vqgan(self.device , conf_path=UpperCamelCase_ , ckpt_path=UpperCamelCase_ )
self.vqgan.eval()
if clip:
__lowerCamelCase = clip
else:
__lowerCamelCase = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
__lowerCamelCase = ProcessorGradientFlow(device=self.device )
__lowerCamelCase = iterations
__lowerCamelCase = lr
__lowerCamelCase = log
__lowerCamelCase = make_grid
__lowerCamelCase = return_val
__lowerCamelCase = quantize
__lowerCamelCase = self.vqgan.decoder.z_shape
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[int]=5 , UpperCamelCase_: int=True ):
__lowerCamelCase = []
if output_path is None:
__lowerCamelCase = """./animation.gif"""
if input_path is None:
__lowerCamelCase = self.save_path
__lowerCamelCase = sorted(glob(input_path + """/*""" ) )
if not len(UpperCamelCase_ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(UpperCamelCase_ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
__lowerCamelCase = total_duration / len(UpperCamelCase_ )
__lowerCamelCase = [frame_duration] * len(UpperCamelCase_ )
if extend_frames:
__lowerCamelCase = 1.5
__lowerCamelCase = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(UpperCamelCase_ ) )
imageio.mimsave(UpperCamelCase_ , UpperCamelCase_ , duration=UpperCamelCase_ )
print(F'gif saved to {output_path}' )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Dict=None , UpperCamelCase_: Dict=None ):
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
__lowerCamelCase = preprocess(Image.open(UpperCamelCase_ ) , target_image_size=2_56 ).to(self.device )
__lowerCamelCase = preprocess_vqgan(UpperCamelCase_ )
__lowerCamelCase, *__lowerCamelCase = self.vqgan.encode(UpperCamelCase_ )
return z
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.latent.detach().requires_grad_()
__lowerCamelCase = base_latent + transform_vector
if self.quantize:
__lowerCamelCase, *__lowerCamelCase = self.vqgan.quantize(UpperCamelCase_ )
else:
__lowerCamelCase = trans_latent
return self.vqgan.decode(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None ):
__lowerCamelCase = self.clip_preprocessor(text=UpperCamelCase_ , images=UpperCamelCase_ , return_tensors="""pt""" , padding=UpperCamelCase_ )
__lowerCamelCase = self.clip(**UpperCamelCase_ )
__lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
__lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict ):
__lowerCamelCase = self._get_clip_similarity(pos_prompts["""prompts"""] , UpperCamelCase_ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
__lowerCamelCase = self._get_clip_similarity(neg_prompts["""prompts"""] , UpperCamelCase_ , weights=neg_prompts["""weights"""] )
else:
__lowerCamelCase = torch.tensor([1] , device=self.device )
__lowerCamelCase = -torch.log(UpperCamelCase_ ) + torch.log(UpperCamelCase_ )
return loss
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = torch.randn_like(self.latent , requires_grad=UpperCamelCase_ , device=self.device )
__lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowerCamelCase = self._add_vector(UpperCamelCase_ )
__lowerCamelCase = loop_post_process(UpperCamelCase_ )
__lowerCamelCase = self._get_CLIP_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print("""CLIP loss""" , UpperCamelCase_ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] ):
wandb.init(reinit=UpperCamelCase_ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
__lowerCamelCase = Image.open(UpperCamelCase_ )
__lowerCamelCase = image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: str ):
if not prompts:
return []
__lowerCamelCase = []
__lowerCamelCase = []
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(UpperCamelCase_ , (tuple, list) ):
__lowerCamelCase = prompt[0]
__lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
__lowerCamelCase, __lowerCamelCase = prompt.split(""":""" )
__lowerCamelCase = float(UpperCamelCase_ )
else:
__lowerCamelCase = prompt
__lowerCamelCase = 1.0
processed_prompts.append(UpperCamelCase_ )
weights.append(UpperCamelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase_ , device=self.device ),
}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Dict=None , UpperCamelCase_: Any=True , UpperCamelCase_: Tuple=False , UpperCamelCase_: Tuple=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=None , ):
if image_path:
__lowerCamelCase = self._get_latent(UpperCamelCase_ )
else:
__lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
__lowerCamelCase = self.process_prompts(UpperCamelCase_ )
__lowerCamelCase = self.process_prompts(UpperCamelCase_ )
if save_final and save_path is None:
__lowerCamelCase = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(UpperCamelCase_ ):
os.makedirs(UpperCamelCase_ )
else:
__lowerCamelCase = save_path + """_""" + get_timestamp()
os.makedirs(UpperCamelCase_ )
__lowerCamelCase = save_path
__lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(UpperCamelCase_ ) )
__lowerCamelCase = loop_post_process(UpperCamelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ):
if show_intermediate:
show_pil(UpperCamelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(UpperCamelCase_ )} )
if show_final:
show_pil(UpperCamelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 29 |
UpperCAmelCase_ = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
UpperCAmelCase_ = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : str ):
'''simple docstring'''
__lowerCamelCase = start
# add current to visited
visited.append(A__ )
__lowerCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
__lowerCamelCase = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort('a', [], [])
print(sort)
| 29 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self : Union[str, Any] , lowercase_ : bool , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None ):
super().__init__()
snake_case_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ = torch.zeros(lowercase_ , lowercase_ )
else:
snake_case_ = None
snake_case_ = torch.nn.Parameter(lowercase_ )
class a ( _lowerCamelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self : Any , lowercase_ : VQModel , lowercase_ : CLIPTextModel , lowercase_ : CLIPTokenizer , lowercase_ : TransformeraDModel , lowercase_ : VQDiffusionScheduler , lowercase_ : LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=lowercase_ , transformer=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
def A_ ( self : Optional[int] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any ):
snake_case_ = len(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
snake_case_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate text embeddings for each generation per prompt
snake_case_ = prompt_embeds.repeat_interleave(lowercase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase_ , 1 , 1 )
else:
snake_case_ = [''''''] * batch_size
snake_case_ = text_input_ids.shape[-1]
snake_case_ = self.tokenizer(
lowercase_ , padding='''max_length''' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='''pt''' , )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , lowercase_ , 1 )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Union[str, Any] , lowercase_ : Union[str, List[str]] , lowercase_ : int = 100 , lowercase_ : float = 5.0 , lowercase_ : float = 1.0 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ):
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = 1
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ = len(lowercase_ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}" )
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ = self._encode_prompt(lowercase_ , lowercase_ , lowercase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_ )}." )
# get the initial completely masked latents unless the user supplied it
snake_case_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ = self.transformer.num_vector_embeds - 1
snake_case_ = torch.full(lowercase_ , lowercase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F" {self.transformer.num_vector_embeds - 1} (inclusive)." )
snake_case_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
snake_case_ = self.scheduler.timesteps.to(self.device )
snake_case_ = latents
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ = self.transformer(lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ ).sample
if do_classifier_free_guidance:
snake_case_ ,snake_case_ = model_output.chunk(2 )
snake_case_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase_ , dim=1 , keepdim=lowercase_ )
snake_case_ = self.truncate(lowercase_ , lowercase_ )
# remove `log(0)`'s (`-inf`s)
snake_case_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
snake_case_ = self.vqvae.config.vq_embed_dim
snake_case_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ = self.vqvae.quantize.get_codebook_entry(lowercase_ , shape=lowercase_ )
snake_case_ = self.vqvae.decode(lowercase_ , force_not_quantize=lowercase_ ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : torch.FloatTensor , lowercase_ : float ):
snake_case_ ,snake_case_ = torch.sort(lowercase_ , 1 , descending=lowercase_ )
snake_case_ = torch.exp(lowercase_ )
snake_case_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ = torch.full_like(keep_mask[:, 0:1, :] , lowercase_ )
snake_case_ = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ = keep_mask[:, :-1, :]
snake_case_ = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ = log_p_x_0.clone()
snake_case_ = -torch.inf # -inf = log(0)
return rv
| 56 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
return (preds == labels).mean()
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =pearsonr(_lowerCAmelCase , _lowerCAmelCase )[0]
__lowercase =spearmanr(_lowerCAmelCase , _lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
| 166 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __A :
def __init__( self , a__ , a__=3 , a__=7 , a__=True , a__=True , a__=False , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : str = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=a__ , )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = FalconModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(a__ , attention_mask=a__ )
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Tuple = FalconModel(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : int = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , )
_lowerCAmelCase : Optional[int] = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Dict = FalconForCausalLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : List[Any] = FalconForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , )
_lowerCAmelCase : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : str = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )["""hidden_states"""][0]
_lowerCAmelCase : Optional[Any] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )["""hidden_states"""][0]
# select random slice
_lowerCAmelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Tuple = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : int = (FalconForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Tuple = False
_UpperCamelCase : Optional[Any] = False
def __A ( self ):
_lowerCAmelCase : Optional[int] = FalconModelTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase , *_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_lowerCAmelCase : Tuple = alibi
self.model_tester.create_and_check_model(a__ , *a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Union[str, Any] = input_dict["""input_ids"""]
_lowerCAmelCase : Dict = input_ids.ne(1 ).to(a__ )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = FalconForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = 3
_lowerCAmelCase : Optional[int] = """single_label_classification"""
_lowerCAmelCase : List[Any] = input_dict["""input_ids"""]
_lowerCAmelCase : Dict = input_ids.ne(1 ).to(a__ )
_lowerCAmelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = FalconForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[str] = input_dict["""input_ids"""]
_lowerCAmelCase : List[Any] = FalconForCausalLM(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(a__ , use_cache=a__ )
_lowerCAmelCase : Dict = input_ids.shape[0]
_lowerCAmelCase : Dict = model._convert_to_rw_cache(result.past_key_values )
_lowerCAmelCase : int = model._convert_cache_to_standard_format(a__ , a__ )
for layer in range(len(a__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : str = 3
_lowerCAmelCase : List[Any] = """multi_label_classification"""
_lowerCAmelCase : List[Any] = input_dict["""input_ids"""]
_lowerCAmelCase : Tuple = input_ids.ne(1 ).to(a__ )
_lowerCAmelCase : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase : List[Any] = FalconForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(a__ , """use_cache""" ):
return
_lowerCAmelCase : Dict = model_class(a__ ).to(a__ )
if "use_cache" not in inputs:
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : int = model(**a__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_lowerCAmelCase : int = (
getattr(a__ , """decoder_layers""" , a__ )
or getattr(a__ , """num_decoder_layers""" , a__ )
or config.num_hidden_layers
)
_lowerCAmelCase : Dict = getattr(a__ , """num_kv_heads""" , config.num_attention_heads )
_lowerCAmelCase : Optional[Any] = getattr(a__ , """d_model""" , config.hidden_size )
_lowerCAmelCase : List[Any] = embed_dim // num_attention_heads
_lowerCAmelCase : int = outputs["""past_key_values"""]
self.assertEqual(len(a__ ) , a__ )
_lowerCAmelCase , _lowerCAmelCase : Tuple = inputs["""input_ids"""].shape
for i in range(a__ ):
if config.new_decoder_architecture:
_lowerCAmelCase : Any = config.num_attention_heads
elif config.multi_query:
_lowerCAmelCase : Dict = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
_lowerCAmelCase : Dict = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(a__ )
_lowerCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(a__ )
_lowerCAmelCase : Dict = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
_lowerCAmelCase : List[Any] = model.generate(**a__ , do_sample=a__ , max_new_tokens=19 )
_lowerCAmelCase : int = tokenizer.batch_decode(a__ )[0]
self.assertEqual(a__ , a__ )
@slow
def __A ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(a__ )
_lowerCAmelCase : Any = FalconForCausalLM.from_pretrained(a__ )
model.eval()
model.to(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(a__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**a__ , do_sample=a__ , max_new_tokens=4 )
model.generate(**a__ , do_sample=a__ , max_new_tokens=4 )
model.generate(**a__ , num_beams=2 , max_new_tokens=4 )
@slow
def __A ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(a__ )
_lowerCAmelCase : Union[str, Any] = FalconForCausalLM.from_pretrained(a__ )
model.eval()
model.to(device=a__ )
_lowerCAmelCase : int = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(a__ )
# Test results are the same with and without cache
_lowerCAmelCase : str = model.generate(**a__ , do_sample=a__ , max_new_tokens=20 , use_cache=a__ )
_lowerCAmelCase : str = model.generate(**a__ , do_sample=a__ , max_new_tokens=20 , use_cache=a__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 126 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 126 | 1 |
import math
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(A__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 348 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a ( A__ : Dict ) -> str:
"""simple docstring"""
_lowercase ={}
_lowercase =job['started_at']
_lowercase =job['completed_at']
_lowercase =date_parser.parse(A__ )
_lowercase =date_parser.parse(A__ )
_lowercase =round((end_datetime - start_datetime).total_seconds() / 60.0 )
_lowercase =start
_lowercase =end
_lowercase =duration_in_min
return job_info
def a ( A__ : Dict , A__ : str=None ) -> Tuple:
"""simple docstring"""
_lowercase =None
if token is not None:
_lowercase ={'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
_lowercase =F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowercase =requests.get(A__ , headers=A__ ).json()
_lowercase ={}
try:
job_time.update({job['name']: extract_time_from_single_job(A__ ) for job in result['jobs']} )
_lowercase =math.ceil((result['total_count'] - 100) / 100 )
for i in range(A__ ):
_lowercase =requests.get(url + F'''&page={i + 2}''' , headers=A__ ).json()
job_time.update({job['name']: extract_time_from_single_job(A__ ) for job in result['jobs']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
lowercase_ = parser.parse_args()
lowercase_ = get_job_time(args.workflow_run_id)
lowercase_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"{k}: {v['duration']}")
| 205 | 0 |
"""simple docstring"""
from math import factorial
A: dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def _snake_case ( UpperCamelCase : int ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(UpperCamelCase ) )
def _snake_case ( UpperCamelCase : int = 60 , UpperCamelCase : int = 1000000 ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
UpperCAmelCase : Tuple = 0
# the cached sizes of the previous chains
UpperCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 , UpperCamelCase ):
# The temporary set will contain the elements of the chain
UpperCAmelCase : List[Any] = set()
UpperCAmelCase : Any = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase : Tuple = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(UpperCamelCase )
chain_set_length += 1
UpperCAmelCase : List[Any] = digit_factorial_sum(UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase : str = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 76 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( UpperCamelCase : str ):
def decorator(UpperCamelCase : Optional[int] ):
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , """handle_key""" , [] )
handle += [key]
setattr(UpperCamelCase , """handle_key""" , UpperCamelCase )
return func
return decorator
def _snake_case ( *UpperCamelCase : List[str] ):
def decorator(UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """handle_key""" , [] )
handle += keys
setattr(UpperCamelCase , """handle_key""" , UpperCamelCase )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __new__( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] = super().__new__(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not hasattr(_SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(_SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(_SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase : List[str] = getattr(_SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
UpperCAmelCase : Optional[int] = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase : List[Any] = ord(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = cls.key_handler.get(_SCREAMING_SNAKE_CASE )
if handler:
UpperCAmelCase : int = char
return handler(cls )
else:
return None
def _snake_case ( cls : Union[str, Any] ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 76 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.