code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCamelCase = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
UpperCamelCase = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Dict = list(state_dict.keys() )
for name in state_dict_keys:
A_ : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith('''emb.''' ):
A_ : Tuple = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
A_ : int = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
A_ : List[Any] = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
A_ : Optional[Any] = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
A_ : Optional[int] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
A_ : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
A_ : Tuple = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
A_ : Union[str, Any] = '''rwkv.''' + name
A_ : Dict = weight
return state_dict
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
A_ : List[str] = 50_277
A_ : str = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
A_ : Optional[int] = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE )
A_ : str = len(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
# 2. Build the config
A_ : List[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A_ : List[Any] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
A_ : Optional[int] = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
A_ : Optional[Any] = hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
A_ : Tuple = convert_state_dict(SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
A_ , A_ : Optional[Any] = shard_checkpoint(SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if index is not None:
A_ : str = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save the index as well
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
A_ : str = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
A_ : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A_ : str = torch.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
A_ : Optional[int] = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE )
model.push_to_hub(SCREAMING_SNAKE_CASE , max_shard_size='''2GB''' )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
UpperCamelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 186
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = None
A_ : Any = 20
A_ : Any = self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
A_ : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
A_ : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
A_ : List[str] = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
A_ : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
A_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
A_ : List[Any] = jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Any = None
A_ : List[Any] = 10
A_ : str = 2
# create ramp distribution
A_ : Any = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
A_ : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : Tuple = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
A_ : Optional[int] = 5
A_ : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
A_ : Optional[Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
A_ : Dict = top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = None
A_ : Optional[Any] = 10
A_ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A_ : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
A_ : str = FlaxTopPLogitsWarper(0.8 )
A_ : Optional[int] = np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A_ : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# check edge cases with negative and extreme logits
A_ : Union[str, Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A_ : str = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
A_ : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
A_ : str = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = 20
A_ : Union[str, Any] = 4
A_ : Optional[Any] = 0
A_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
A_ : int = ids_tensor((batch_size, 20) , vocab_size=20 )
A_ : List[Any] = 5
A_ : Optional[int] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
A_ : Tuple = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = 15
A_ : int = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = 20
A_ : Optional[int] = 4
A_ : Optional[int] = 0
A_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
A_ : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
A_ : str = 1
A_ : List[str] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A_ : Optional[int] = 3
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = 20
A_ : str = 4
A_ : Dict = 0
A_ : Optional[int] = 5
A_ : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
A_ : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
A_ : Any = 4
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A_ : int = 3
A_ : Union[str, Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Union[str, Any] = 15
A_ : str = 2
A_ : int = 1
A_ : List[str] = 15
# dummy input_ids and scores
A_ : Tuple = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : int = input_ids.copy()
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = scores.copy()
# instantiate all dist processors
A_ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = 10
# no processor list
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[str] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
A_ : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : List[str] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Tuple = 15
A_ : List[str] = 2
A_ : List[str] = 1
A_ : Union[str, Any] = 15
# dummy input_ids and scores
A_ : Any = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = input_ids.copy()
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = scores.copy()
# instantiate all dist processors
A_ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[Any] = FlaxTopKLogitsWarper(3 )
A_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : str = 10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : Optional[int] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
A_ : Optional[int] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Dict = jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 186
| 1
|
"""simple docstring"""
import argparse
import copy
def _lowercase ( __snake_case ) -> List[Any]:
__lowerCAmelCase : Optional[Any] = {}
with open(__snake_case ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__lowerCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
__lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__lowerCAmelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
__lowerCAmelCase : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowercase ( __snake_case ,__snake_case ) -> int:
with open(__snake_case ) as f:
__lowerCAmelCase : Union[str, Any] = f.read(1 )
__lowerCAmelCase : Any = start_node
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Tuple = start_node
__lowerCAmelCase : List[str] = 0
while visiting not in first_solution:
__lowerCAmelCase : List[Any] = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__snake_case ) and k[0] not in first_solution:
__lowerCAmelCase : Dict = k[1]
__lowerCAmelCase : Dict = k[0]
first_solution.append(__snake_case )
__lowerCAmelCase : Optional[Any] = distance_of_first_solution + int(__snake_case )
__lowerCAmelCase : int = best_node
first_solution.append(__snake_case )
__lowerCAmelCase : Optional[int] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__lowerCAmelCase : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Dict = []
for n in solution[1:-1]:
__lowerCAmelCase : Any = solution.index(__snake_case )
for kn in solution[1:-1]:
__lowerCAmelCase : int = solution.index(__snake_case )
if n == kn:
continue
__lowerCAmelCase : int = copy.deepcopy(__snake_case )
__lowerCAmelCase : Tuple = kn
__lowerCAmelCase : Optional[Any] = n
__lowerCAmelCase : List[str] = 0
for k in _tmp[:-1]:
__lowerCAmelCase : str = _tmp[_tmp.index(__snake_case ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__lowerCAmelCase : List[Any] = distance + int(i[1] )
_tmp.append(__snake_case )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__lowerCAmelCase : int = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __snake_case : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : str = first_solution
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Tuple = distance_of_first_solution
__lowerCAmelCase : Tuple = solution
while count <= iters:
__lowerCAmelCase : List[str] = find_neighborhood(__snake_case ,__snake_case )
__lowerCAmelCase : str = 0
__lowerCAmelCase : Union[str, Any] = neighborhood[index_of_best_solution]
__lowerCAmelCase : Any = len(__snake_case ) - 1
__lowerCAmelCase : Union[str, Any] = False
while not found:
__lowerCAmelCase : str = 0
while i < len(__snake_case ):
if best_solution[i] != solution[i]:
__lowerCAmelCase : List[str] = best_solution[i]
__lowerCAmelCase : str = solution[i]
break
__lowerCAmelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Optional[int] = best_solution[:-1]
__lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__lowerCAmelCase : List[Any] = cost
__lowerCAmelCase : List[Any] = solution
else:
__lowerCAmelCase : List[str] = index_of_best_solution + 1
__lowerCAmelCase : Optional[int] = neighborhood[index_of_best_solution]
if len(__snake_case ) >= size:
tabu_list.pop(0 )
__lowerCAmelCase : int = count + 1
return best_solution_ever, best_cost
def _lowercase ( __snake_case=None ) -> Optional[Any]:
__lowerCAmelCase : Union[str, Any] = generate_neighbours(args.File )
__lowerCAmelCase , __lowerCAmelCase : List[str] = generate_first_solution(
args.File ,__snake_case )
__lowerCAmelCase , __lowerCAmelCase : Any = tabu_search(
__snake_case ,__snake_case ,__snake_case ,args.Iterations ,args.Size ,)
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 58
|
"""simple docstring"""
import math
def _lowercase ( __snake_case ) -> bool:
__lowerCAmelCase : Optional[Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__snake_case )
def _lowercase ( __snake_case = 1 / 12_345 ) -> int:
__lowerCAmelCase : str = 0
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 3
while True:
__lowerCAmelCase : Optional[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__snake_case ):
__lowerCAmelCase : str = int(__snake_case )
total_partitions += 1
if check_partition_perfect(__snake_case ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__snake_case )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 58
| 1
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict=1_3 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]=9_9 , SCREAMING_SNAKE_CASE_ : Tuple=3_2 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=4 , ) -> Dict:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_attention_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_choices
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_attention_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self : Tuple ) -> str:
lowercase_ = FlaxAlbertModelTester(self )
@slow
def _lowercase ( self : List[Any] ) -> int:
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained('''albert-base-v2''' )
lowercase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_flax
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Any ) -> Dict:
lowercase_ = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
lowercase_ = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 30
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 337
| 0
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowerCAmelCase = float('nan')
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> Tuple:
_snake_case = sys.stdout
_snake_case = open(UpperCAmelCase , """a""" )
def __getattr__(self , UpperCAmelCase ) -> Optional[Any]:
return getattr(self.stdout , UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> str:
self.stdout.write(UpperCAmelCase )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""" , """""" , UpperCAmelCase , 0 , re.M ) )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=False ):
_snake_case = []
# deal with critical env vars
_snake_case = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
_snake_case = os.environ.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
_snake_case = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(_SCREAMING_SNAKE_CASE )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_snake_case = []
_snake_case = """"""
while len(_SCREAMING_SNAKE_CASE ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_SCREAMING_SNAKE_CASE )
_snake_case = """"""
return "\\\n".join(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# unwrap multi-line input
_snake_case = re.sub(R"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
_snake_case = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
_snake_case = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
_snake_case = subprocess.run(_SCREAMING_SNAKE_CASE , capture_output=_SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
_snake_case = variation.replace(""" """ , """-""" )
with open(Path(_SCREAMING_SNAKE_CASE ) / f"""log.{prefix}.stdout.txt""" , """w""" ) as f:
f.write(result.stdout )
with open(Path(_SCREAMING_SNAKE_CASE ) / f"""log.{prefix}.stderr.txt""" , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , """r""" , encoding="""utf-8""" ) as f:
_snake_case = json.load(_SCREAMING_SNAKE_CASE )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
_snake_case = []
_snake_case = []
_snake_case = f"""{id}: {variation:<{longest_variation_len}}"""
_snake_case = f"""{preamble}: """
_snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_SCREAMING_SNAKE_CASE ) , desc=_SCREAMING_SNAKE_CASE , leave=_SCREAMING_SNAKE_CASE ):
_snake_case = process_run_single(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_SCREAMING_SNAKE_CASE ):
metrics.append(_SCREAMING_SNAKE_CASE )
results.append(_SCREAMING_SNAKE_CASE )
outcome += "✓"
else:
outcome += "✘"
_snake_case = f"""\33[2K\r{outcome}"""
if len(_SCREAMING_SNAKE_CASE ) > 0:
_snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_snake_case = round(mean_metrics[target_metric_key] , 2 )
_snake_case = f"""{outcome} {mean_target}"""
if len(_SCREAMING_SNAKE_CASE ) > 1:
results_str += f""" {tuple(round(_SCREAMING_SNAKE_CASE , 2 ) for x in results )}"""
print(_SCREAMING_SNAKE_CASE )
_snake_case = variation
return mean_metrics
else:
print(_SCREAMING_SNAKE_CASE )
return {variation_key: variation, target_metric_key: nan}
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = pd.DataFrame(_SCREAMING_SNAKE_CASE )
_snake_case = """variation"""
_snake_case = """diff_%"""
_snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_SCREAMING_SNAKE_CASE ):
# as a fallback, use the minimal value as the sentinel
_snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_SCREAMING_SNAKE_CASE ):
_snake_case = df.apply(
lambda _SCREAMING_SNAKE_CASE : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
_snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_snake_case = df.reindex(_SCREAMING_SNAKE_CASE , axis="""columns""" ) # reorder cols
# capitalize
_snake_case = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
_snake_case = df.rename(lambda _SCREAMING_SNAKE_CASE : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
_snake_case = df.rename(lambda _SCREAMING_SNAKE_CASE : c.replace("""_""" , """\n""" ) , axis="""columns""" )
_snake_case = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
print("""\n\n""".join(_SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , nargs="""+""" , required=_SCREAMING_SNAKE_CASE , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=_SCREAMING_SNAKE_CASE , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=_SCREAMING_SNAKE_CASE , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=_SCREAMING_SNAKE_CASE , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
_snake_case = parser.parse_args()
_snake_case = args.output_dir
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
_snake_case = get_base_command(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# split each dimension into its --foo variations
_snake_case = [list(map(str.strip , re.split(R"""\|""" , _SCREAMING_SNAKE_CASE ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_snake_case = list(map(str.strip , map(""" """.join , itertools.product(*_SCREAMING_SNAKE_CASE ) ) ) )
_snake_case = max(len(_SCREAMING_SNAKE_CASE ) for x in variations )
# split wanted keys
_snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
_snake_case = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
_snake_case = Tee(_SCREAMING_SNAKE_CASE )
print(f"""\n*** Running {len(_SCREAMING_SNAKE_CASE )} benchmarks:""" )
print(f"""Base command: {" ".join(_SCREAMING_SNAKE_CASE )}""" )
_snake_case = """variation"""
_snake_case = []
for id, variation in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc="""Total completion: """ , leave=_SCREAMING_SNAKE_CASE ) ):
_snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.target_metric_key , _SCREAMING_SNAKE_CASE , args.repeat_times , _SCREAMING_SNAKE_CASE , args.verbose , ) )
process_results(_SCREAMING_SNAKE_CASE , args.target_metric_key , _SCREAMING_SNAKE_CASE , args.base_variation , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 270
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionPanoramaPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase (self ) -> List[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_snake_case = DDIMScheduler()
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_snake_case = CLIPTextModel(UpperCAmelCase )
_snake_case = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase (self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple:
_snake_case = torch.manual_seed(UpperCAmelCase )
_snake_case = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> Tuple:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> Tuple:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase (self ) -> Any:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def lowercase (self ) -> Any:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = """french fries"""
_snake_case = sd_pipe(**UpperCAmelCase , negative_prompt=UpperCAmelCase )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase , view_batch_size=2 )
_snake_case = output.images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> Tuple:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=UpperCAmelCase )
_snake_case = StableDiffusionPanoramaPipeline(**UpperCAmelCase )
_snake_case = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = sd_pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase (self , UpperCAmelCase=0 ) -> List[str]:
_snake_case = torch.manual_seed(UpperCAmelCase )
_snake_case = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> List[Any]:
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_snake_case = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase (self ) -> str:
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=UpperCAmelCase )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_snake_case = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase (self ) -> Optional[int]:
_snake_case = 0
def callback_fn(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
_snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_snake_case = latents[0, -3:, -3:, -1]
_snake_case = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_snake_case = False
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
_snake_case = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
_snake_case = self.get_inputs()
pipe(**UpperCAmelCase , callback=UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase (self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = """stabilityai/stable-diffusion-2-base"""
_snake_case = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder="""scheduler""" )
_snake_case = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
_snake_case = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = self.get_inputs()
_snake_case = pipe(**UpperCAmelCase )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 270
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__A : Optional[List[str]] = None
__A : int = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__A : Any = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class A_ :
UpperCAmelCase__ = True
UpperCAmelCase__ = None
# Automatically constructed
UpperCAmelCase__ = "PIL.Image.Image"
UpperCAmelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCAmelCase__ = field(default='''Image''' , init=a_ , repr=a_ )
def __call__( self ):
'''simple docstring'''
return self.pa_type
def _lowercase ( self , _A ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_A , _A ):
UpperCAmelCase = np.array(_A )
if isinstance(_A , _A ):
return {"path": value, "bytes": None}
elif isinstance(_A , _A ):
return {"path": None, "bytes": value}
elif isinstance(_A , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_A )
elif isinstance(_A , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_A )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowercase ( self , _A , _A=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase = {}
UpperCAmelCase , UpperCAmelCase = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_A ):
UpperCAmelCase = PIL.Image.open(_A )
else:
UpperCAmelCase = path.split('''::''' )[-1]
try:
UpperCAmelCase = string_to_dict(_A , config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase = token_per_repo_id.get(_A )
except ValueError:
UpperCAmelCase = None
with xopen(_A , '''rb''' , use_auth_token=_A ) as f:
UpperCAmelCase = BytesIO(f.read() )
UpperCAmelCase = PIL.Image.open(bytes_ )
else:
UpperCAmelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _lowercase ( self ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def _lowercase ( self , _A ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
UpperCAmelCase = pa.array([None] * len(_A ) , type=pa.binary() )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase = pa.array([None] * len(_A ) , type=pa.string() )
UpperCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase = storage.field('''bytes''' )
else:
UpperCAmelCase = pa.array([None] * len(_A ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase = storage.field('''path''' )
else:
UpperCAmelCase = pa.array([None] * len(_A ) , type=pa.string() )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase = pa.array(
[encode_np_array(np.array(_A ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCAmelCase = pa.array([None] * len(_A ) , type=pa.string() )
UpperCAmelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_A , self.pa_type )
def _lowercase ( self , _A ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_A ):
with xopen(_A , '''rb''' ) as f:
UpperCAmelCase = f.read()
return bytes_
UpperCAmelCase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase = pa.array(
[os.path.basename(_A ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_A , self.pa_type )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase = image.format
else:
UpperCAmelCase = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(UpperCamelCase__ , format=UpperCamelCase__ )
return buffer.getvalue()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> dict:
'''simple docstring'''
if hasattr(UpperCamelCase__ , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCamelCase__ )}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase = array.dtype
UpperCAmelCase = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase = dtype.kind
UpperCAmelCase = dtype.itemsize
UpperCAmelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase = dtype_byteorder + dtype_kind + str(UpperCamelCase__ )
UpperCAmelCase = np.dtype(UpperCamelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
UpperCAmelCase = PIL.Image.fromarray(array.astype(UpperCamelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCamelCase__ )}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase , UpperCAmelCase = first_non_null_value(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCamelCase__ , np.ndarray ):
UpperCAmelCase = no_op_if_value_is_null(UpperCamelCase__ )
return [obj_to_image_dict_func(UpperCamelCase__ ) for obj in objs]
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
UpperCAmelCase = no_op_if_value_is_null(UpperCamelCase__ )
return [obj_to_image_dict_func(UpperCamelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 273
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : torch.FloatTensor
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@register_to_config
def __init__( self ,__UpperCAmelCase = 3 ,__UpperCAmelCase = 3 ,__UpperCAmelCase = ("DownEncoderBlock2D",) ,__UpperCAmelCase = ("UpDecoderBlock2D",) ,__UpperCAmelCase = (64,) ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "silu" ,__UpperCAmelCase = 3 ,__UpperCAmelCase = 32 ,__UpperCAmelCase = 256 ,__UpperCAmelCase = 32 ,__UpperCAmelCase = None ,__UpperCAmelCase = 0.1_8_2_1_5 ,__UpperCAmelCase = "group" ,) -> List[str]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ : Any = Encoder(
in_channels=__UpperCAmelCase ,out_channels=__UpperCAmelCase ,down_block_types=__UpperCAmelCase ,block_out_channels=__UpperCAmelCase ,layers_per_block=__UpperCAmelCase ,act_fn=__UpperCAmelCase ,norm_num_groups=__UpperCAmelCase ,double_z=__UpperCAmelCase ,)
lowerCAmelCase__ : Any = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ : List[Any] = nn.Convad(__UpperCAmelCase ,__UpperCAmelCase ,1 )
lowerCAmelCase__ : Optional[Any] = VectorQuantizer(__UpperCAmelCase ,__UpperCAmelCase ,beta=0.2_5 ,remap=__UpperCAmelCase ,sane_index_shape=__UpperCAmelCase )
lowerCAmelCase__ : str = nn.Convad(__UpperCAmelCase ,__UpperCAmelCase ,1 )
# pass init params to Decoder
lowerCAmelCase__ : Optional[Any] = Decoder(
in_channels=__UpperCAmelCase ,out_channels=__UpperCAmelCase ,up_block_types=__UpperCAmelCase ,block_out_channels=__UpperCAmelCase ,layers_per_block=__UpperCAmelCase ,act_fn=__UpperCAmelCase ,norm_num_groups=__UpperCAmelCase ,norm_type=__UpperCAmelCase ,)
@apply_forward_hook
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = True ) -> VQEncoderOutput:
lowerCAmelCase__ : Any = self.encoder(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = self.quant_conv(__UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__UpperCAmelCase )
@apply_forward_hook
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ,__UpperCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ : Dict = self.quantize(__UpperCAmelCase )
else:
lowerCAmelCase__ : str = h
lowerCAmelCase__ : Dict = self.post_quant_conv(__UpperCAmelCase )
lowerCAmelCase__ : str = self.decoder(__UpperCAmelCase ,quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ : int = sample
lowerCAmelCase__ : List[Any] = self.encode(__UpperCAmelCase ).latents
lowerCAmelCase__ : str = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 356
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase = 10
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for i in range(UpperCamelCase , UpperCamelCase ):
if array[i] == target:
return i
return -1
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = len(UpperCamelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = (left + right) // 3 + 1
lowerCAmelCase__ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase__ : int = one_third - 1
elif array[two_third] < target:
lowerCAmelCase__ : Union[str, Any] = two_third + 1
else:
lowerCAmelCase__ : List[Any] = one_third + 1
lowerCAmelCase__ : List[str] = two_third - 1
else:
return -1
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = (left + right) // 3 + 1
lowerCAmelCase__ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase , one_third - 1 , UpperCamelCase , UpperCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase , UpperCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_lowerCAmelCase = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase = ite_ternary_search(collection, target)
_lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('''Not found''')
| 184
| 0
|
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: List[str] = str(id_ )
_A: Any = None
_A: Tuple = None
_A: Any = []
_A: Dict = {} # {vertex:distance}
def __lt__( self : Union[str, Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
return self.key < other.key
def __repr__( self : Tuple ):
"""simple docstring"""
return self.id
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
self.neighbors.append(lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Any = weight
def lowerCamelCase__ ( a , a , a , a ) -> Tuple:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def lowerCamelCase__ ( a , a ) -> list:
_A: int = []
for u in graph:
_A: Union[str, Any] = math.inf
_A: int = None
_A: Optional[Any] = 0
_A: Union[str, Any] = graph[:]
while q:
_A: str = min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_A: int = u
_A: Union[str, Any] = u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCamelCase__ ( a , a ) -> Iterator[tuple]:
for u in graph:
_A: Tuple = math.inf
_A: Union[str, Any] = None
_A: str = 0
_A: str = list(__snake_case )
hq.heapify(__snake_case )
while h:
_A: Any = hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_A: List[Any] = u
_A: Optional[int] = u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCamelCase__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__snake_case : Optional[int] = ['small', 'medium', 'large']
__snake_case : Optional[int] = 'lm_head.decoder.weight'
__snake_case : List[Any] = 'lm_head.weight'
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> int:
"""simple docstring"""
A__ : str =torch.load(__snake_case )
A__ : List[Any] =d.pop(__snake_case )
os.makedirs(__snake_case, exist_ok=__snake_case )
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
__snake_case : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__snake_case : Dict = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
__snake_case : List[Any] = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 134
| 0
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> int:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> Any:
return max(metric_fn(_snake_case , _snake_case ) for gt in ground_truths )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> int:
_snake_case = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
_snake_case = []
if args.gold_data_mode == "qa":
_snake_case = pd.read_csv(_snake_case , sep='''\t''' , header=_snake_case )
for answer_list in data[1]:
_snake_case = ast.literal_eval(_snake_case )
answers.append(_snake_case )
else:
_snake_case = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
_snake_case = [[reference] for reference in references]
_snake_case = 0
for prediction, ground_truths in zip(_snake_case , _snake_case ):
total += 1
em += metric_max_over_ground_truths(_snake_case , _snake_case , _snake_case )
fa += metric_max_over_ground_truths(_snake_case , _snake_case , _snake_case )
_snake_case = 100.0 * em / total
_snake_case = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> int:
_snake_case = args.k
_snake_case = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
_snake_case = [line.strip() for line in open(_snake_case , '''r''' ).readlines()]
_snake_case = 0
for hypo, reference in zip(_snake_case , _snake_case ):
_snake_case = set(hypo.split('''\t''' )[:k] )
_snake_case = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_snake_case = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
def strip_title(__lowerCamelCase : Optional[Any] ):
if title.startswith('''"''' ):
_snake_case = title[1:]
if title.endswith('''"''' ):
_snake_case = title[:-1]
return title
_snake_case = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_snake_case , return_tensors='''pt''' , padding=_snake_case , truncation=_snake_case , )['''input_ids'''].to(args.device )
_snake_case = rag_model.rag.question_encoder(_snake_case )
_snake_case = question_enc_outputs[0]
_snake_case = rag_model.retriever(
_snake_case , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
_snake_case = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_snake_case = []
for docs in all_docs:
_snake_case = [strip_title(_snake_case ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_snake_case ) )
return provenance_strings
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> Tuple:
with torch.no_grad():
_snake_case = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_snake_case , return_tensors='''pt''' , padding=_snake_case , truncation=_snake_case )
_snake_case = inputs_dict.input_ids.to(args.device )
_snake_case = inputs_dict.attention_mask.to(args.device )
_snake_case = rag_model.generate( # rag_model overwrites generate
_snake_case , attention_mask=_snake_case , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_snake_case , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_snake_case = rag_model.retriever.generator_tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
if args.print_predictions:
for q, a in zip(_snake_case , _snake_case ):
logger.info('''Q: {} - A: {}'''.format(_snake_case , _snake_case ) )
return answers
def _UpperCAmelCase ( ) -> int:
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_snake_case , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_snake_case , choices=['''exact''', '''compressed''', '''legacy'''] , type=_snake_case , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_snake_case , type=_snake_case , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_snake_case , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_snake_case , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_snake_case , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_snake_case , type=_snake_case , required=_snake_case , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_snake_case , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_snake_case , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_snake_case , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_snake_case , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_snake_case , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_snake_case , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
_snake_case = parser.parse_args()
_snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> Union[str, Any]:
_snake_case = {}
if args.model_type is None:
_snake_case = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
_snake_case = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
_snake_case = args.n_docs
if args.index_name is not None:
_snake_case = args.index_name
if args.index_path is not None:
_snake_case = args.index_path
else:
_snake_case = BartForConditionalGeneration
_snake_case = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _snake_case )
_snake_case = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
_snake_case = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_snake_case , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_snake_case ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
_snake_case = RagRetriever.from_pretrained(_snake_case , **_snake_case )
_snake_case = model_class.from_pretrained(_snake_case , retriever=_snake_case , **_snake_case )
model.retriever.init_retrieval()
else:
_snake_case = model_class.from_pretrained(_snake_case , **_snake_case )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
_snake_case = []
for line in tqdm(_snake_case ):
questions.append(line.strip() )
if len(_snake_case ) == args.eval_batch_size:
_snake_case = evaluate_batch_fn(_snake_case , _snake_case , _snake_case )
preds_file.write('''\n'''.join(_snake_case ) + '''\n''' )
preds_file.flush()
_snake_case = []
if len(_snake_case ) > 0:
_snake_case = evaluate_batch_fn(_snake_case , _snake_case , _snake_case )
preds_file.write('''\n'''.join(_snake_case ) )
preds_file.flush()
score_fn(_snake_case , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase__ = get_args()
main(args)
| 352
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=13 , _lowerCamelCase : int=32 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[int]=[10, 20, 30, 40] , _lowerCamelCase : Dict=[2, 2, 3, 2] , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Any=0.0_2 , _lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , _lowerCamelCase : Any=[2, 3, 4] , _lowerCamelCase : Any=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = num_stages
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = out_features
_snake_case = out_indices
_snake_case = scope
def lowercase ( self : Dict ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : str ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str] ):
_snake_case = ConvNextVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] ):
_snake_case = ConvNextVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ):
_snake_case = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case = None
_snake_case = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase ( self : str ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
def lowercase ( self : int ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__a = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : str ):
_snake_case = ConvNextVaModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def lowercase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Dict ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowercase ( self : Dict ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowercase ( self : int ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowercase ( self : int ):
pass
def lowercase ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case = True
if model_class.__name__ in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]:
continue
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
_snake_case = model(**_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Dict ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case = False
_snake_case = True
if (
model_class.__name__
in [*get_values(_lowerCamelCase ), *get_values(_lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
_snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
_snake_case = model(**_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Optional[Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : Optional[Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Optional[int] ):
def check_hidden_states_output(_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] ):
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : str ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = ConvNextVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : List[Any] ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ):
_snake_case = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = preprocessor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# verify the logits
_snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_snake_case = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 40
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ = random.Random()
if is_torch_available():
import torch
def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=None ) ->Tuple:
if rng is None:
_SCREAMING_SNAKE_CASE = global_rng
_SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=7 , A=400 , A=2000 , A=1 , A=0.0 , A=1_6000 , A=True , A=True , ) -> Tuple:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = min_seq_length
_SCREAMING_SNAKE_CASE = max_seq_length
_SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_SCREAMING_SNAKE_CASE = feature_size
_SCREAMING_SNAKE_CASE = padding_value
_SCREAMING_SNAKE_CASE = sampling_rate
_SCREAMING_SNAKE_CASE = return_attention_mask
_SCREAMING_SNAKE_CASE = do_normalize
def snake_case_( self ) -> Dict:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_( self , A=False , A=False ) -> str:
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
_SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_SCREAMING_SNAKE_CASE = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_SCREAMING_SNAKE_CASE = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ASTFeatureExtractor
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = ASTFeatureExtractionTester(self )
def snake_case_( self ) -> Dict:
# Tests that all call wrap to encode_plus and batch_encode_plus
_SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_SCREAMING_SNAKE_CASE = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
_SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
_SCREAMING_SNAKE_CASE = feat_extract(A , padding=A , return_tensors="""np""" ).input_values
_SCREAMING_SNAKE_CASE = feat_extract(A , padding=A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_SCREAMING_SNAKE_CASE = np.asarray(A )
_SCREAMING_SNAKE_CASE = feat_extract(A , return_tensors="""np""" ).input_values
_SCREAMING_SNAKE_CASE = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
@require_torch
def snake_case_( self ) -> List[Any]:
import torch
_SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_SCREAMING_SNAKE_CASE = np.random.rand(100 ).astype(np.floataa )
_SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case_( self , A ) -> Optional[Any]:
from datasets import load_dataset
_SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def snake_case_( self ) -> Optional[int]:
# fmt: off
_SCREAMING_SNAKE_CASE = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
_SCREAMING_SNAKE_CASE = ASTFeatureExtractor()
_SCREAMING_SNAKE_CASE = feature_extractor(A , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A , atol=1e-4 ) )
| 58
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase_ = HUGGINGFACE_HUB_CACHE
lowercase_ = """config.json"""
lowercase_ = """diffusion_pytorch_model.bin"""
lowercase_ = """diffusion_flax_model.msgpack"""
lowercase_ = """model.onnx"""
lowercase_ = """diffusion_pytorch_model.safetensors"""
lowercase_ = """weights.pb"""
lowercase_ = """https://huggingface.co"""
lowercase_ = default_cache_path
lowercase_ = """diffusers_modules"""
lowercase_ = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowercase_ = ["""fp16""", """non-ema"""]
lowercase_ = """.self_attn"""
| 58
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[List[PIL.Image.Image], np.ndarray]
__snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : np.ndarray
__snake_case : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 356
|
from __future__ import annotations
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE = i + 1
else:
SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 193
| 0
|
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : List[Any]=32 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : List[str]=37 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : Tuple=16 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]="None" , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Union[str, Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = relative_attention
__lowerCamelCase = position_biased_input
__lowerCamelCase = pos_att_type
__lowerCamelCase = scope
def __A ( self : Any ) -> str:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Any ) -> List[str]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self : Dict ) -> Dict:
__lowerCamelCase = self.get_config()
__lowerCamelCase = 3_00
return config
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
__lowerCamelCase = DebertaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )[0]
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )[0]
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
__lowerCamelCase = DebertaForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Any:
__lowerCamelCase = DebertaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : str ) -> str:
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ : Union[str, Any] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Dict = True
a__ : Dict = False
a__ : str = False
a__ : str = False
a__ : int = False
def __A ( self : List[str] ) -> Tuple:
__lowerCamelCase = DebertaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __A ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] ) -> Optional[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> List[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Dict:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __A ( self : Tuple ) -> Optional[int]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DebertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def __A ( self : Dict ) -> List[str]:
pass
@slow
def __A ( self : Optional[Any] ) -> Optional[Any]:
__lowerCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowerCamelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
# compare the actual values for a slice.
__lowerCamelCase = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 270
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __A ( self : List[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE__ )][self.get_x(SCREAMING_SNAKE_CASE__ )]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_x * x )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 800, 600
SCREAMING_SNAKE_CASE__ : int = imread("image_data/lena.jpg", 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 270
| 1
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> None:
"""simple docstring"""
snake_case_ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ), f'''{len(SCREAMING_SNAKE_CASE )} != {len(SCREAMING_SNAKE_CASE )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Optional[Any]:
"""simple docstring"""
try:
snake_case_ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(SCREAMING_SNAKE_CASE ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "student" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , )-> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
snake_case_ = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ).save_pretrained(SCREAMING_SNAKE_CASE ) # purely for convenience
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE ).eval()
else:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), f'''teacher must be a model or string got type {type(SCREAMING_SNAKE_CASE )}'''
snake_case_ = teacher.config.to_diff_dict()
try:
snake_case_ , snake_case_ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
snake_case_ = teacher_e
if d is None:
snake_case_ = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
snake_case_ , snake_case_ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
snake_case_ , snake_case_ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
snake_case_ = teacher_e
if d is None:
snake_case_ = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(SCREAMING_SNAKE_CASE )
# Copy weights
snake_case_ = teacher.config_class(**SCREAMING_SNAKE_CASE )
snake_case_ = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
snake_case_ = student.load_state_dict(teacher.state_dict() , strict=SCREAMING_SNAKE_CASE )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
snake_case_ , snake_case_ = list(range(SCREAMING_SNAKE_CASE ) ), list(range(SCREAMING_SNAKE_CASE ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(SCREAMING_SNAKE_CASE )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
snake_case_ = pick_layers_to_copy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if d_layers_to_copy is None:
snake_case_ = pick_layers_to_copy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
try:
if hasattr(
SCREAMING_SNAKE_CASE , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , SCREAMING_SNAKE_CASE )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , SCREAMING_SNAKE_CASE )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , SCREAMING_SNAKE_CASE )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , SCREAMING_SNAKE_CASE )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , SCREAMING_SNAKE_CASE )
copy_layers(teacher.decoder.block , student.decoder.block , SCREAMING_SNAKE_CASE )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
snake_case_ = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(SCREAMING_SNAKE_CASE )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 361
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
snake_case_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE )
snake_case_ = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
| 0
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _a ( unittest.TestCase ):
A = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: List[Any] = hf_hub_download(
repo_id="""nateraw/video-demo""", filename="""archery.mp4""", repo_type="""dataset""" )
UpperCAmelCase_: List[Any] = VideoClassificationPipeline(model=__lowerCamelCase, image_processor=__lowerCamelCase, top_k=2 )
UpperCAmelCase_: int = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
for example in examples:
UpperCAmelCase_: int = video_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase, [
{"""score""": ANY(__lowerCamelCase ), """label""": ANY(__lowerCamelCase )},
{"""score""": ANY(__lowerCamelCase ), """label""": ANY(__lowerCamelCase )},
], )
@require_torch
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Tuple = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCAmelCase_: Optional[int] = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10}, crop_size={"""height""": 10, """width""": 10} )
UpperCAmelCase_: int = pipeline(
"""video-classification""", model=__lowerCamelCase, feature_extractor=__lowerCamelCase, frame_sampling_rate=4 )
UpperCAmelCase_: Tuple = hf_hub_download(repo_id="""nateraw/video-demo""", filename="""archery.mp4""", repo_type="""dataset""" )
UpperCAmelCase_: Optional[Any] = video_classifier(__lowerCamelCase, top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase, decimals=4 ), [{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}], )
UpperCAmelCase_: Any = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(__lowerCamelCase, decimals=4 ), [
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
], )
@require_tf
def __snake_case (self ) -> Union[str, Any]:
pass
| 147
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 184
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : int = 0
lowercase__ : bool = False
lowercase__ : float = 3.0
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=lowerCAmelCase__ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {"""a""": 2, """c""": 2.2_5} )
@require_cuda
def __magic_name__ ( self ) -> str:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__magic_name__ : Tuple = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
__magic_name__ : List[str] = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__magic_name__ : Tuple = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , lowerCAmelCase__ )
@require_multi_gpu
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[Any] = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__magic_name__: Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__magic_name__: Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
__magic_name__: Optional[Any] = torch.nn.Linear(100, 200)
__magic_name__: Dict = accelerator.prepare(model)
# Check the values changed in kwargs
__magic_name__: str = ""
__magic_name__: Optional[Any] = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 138
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> Optional[Any]:
__magic_name__ : Optional[Any] = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Optional[Any] = patch_size
__magic_name__ : Union[str, Any] = num_channels
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Tuple = hidden_size
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : str = hidden_dropout_prob
__magic_name__ : List[str] = attention_probs_dropout_prob
__magic_name__ : Tuple = type_sequence_label_size
__magic_name__ : Any = initializer_range
__magic_name__ : str = scope
__magic_name__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__magic_name__ : Dict = (image_size // patch_size) ** 2
__magic_name__ : int = num_patches + 2
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Tuple = None
if self.use_labels:
__magic_name__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[str]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
__magic_name__ : int = TFDeiTModel(config=lowerCAmelCase__ )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : int = TFDeiTForMaskedImageModeling(config=lowerCAmelCase__ )
__magic_name__ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Tuple = 1
__magic_name__ : List[Any] = TFDeiTForMaskedImageModeling(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Union[str, Any] = self.type_sequence_label_size
__magic_name__ : str = TFDeiTForImageClassification(lowerCAmelCase__ )
__magic_name__ : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : int = 1
__magic_name__ : List[str] = TFDeiTForImageClassification(lowerCAmelCase__ )
__magic_name__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : int = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowercase__ : Any = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowercase__ : int = False
lowercase__ : List[Any] = False
lowercase__ : Tuple = False
lowercase__ : int = False
def __magic_name__ ( self ) -> str:
__magic_name__ : str = TFDeiTModelTester(self )
__magic_name__ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ ,__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Dense ) )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(lowerCAmelCase__ )
__magic_name__ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : int = [*signature.parameters.keys()]
__magic_name__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
__magic_name__ : Any = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __magic_name__ ( self ) -> Dict:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = TFDeiTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[int]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
__magic_name__ : Any = self.default_image_processor
__magic_name__ : Tuple = prepare_img()
__magic_name__ : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="""tf""" )
# forward pass
__magic_name__ : Dict = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : List[Any] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 138
| 1
|
'''simple docstring'''
import argparse
import copy
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Dict:
UpperCAmelCase : List[Any] = {}
with open(_lowerCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
UpperCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
UpperCAmelCase : Union[str, Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
UpperCAmelCase : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
UpperCAmelCase : Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> str:
with open(_lowerCAmelCase ) as f:
UpperCAmelCase : Dict = f.read(1 )
UpperCAmelCase : Union[str, Any] = start_node
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Optional[int] = start_node
UpperCAmelCase : Tuple = 0
while visiting not in first_solution:
UpperCAmelCase : Dict = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCAmelCase ) and k[0] not in first_solution:
UpperCAmelCase : Optional[int] = k[1]
UpperCAmelCase : List[str] = k[0]
first_solution.append(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = distance_of_first_solution + int(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = best_node
first_solution.append(_lowerCAmelCase )
UpperCAmelCase : str = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
UpperCAmelCase : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ) -> List[Any]:
UpperCAmelCase : int = []
for n in solution[1:-1]:
UpperCAmelCase : List[Any] = solution.index(_lowerCAmelCase )
for kn in solution[1:-1]:
UpperCAmelCase : List[Any] = solution.index(_lowerCAmelCase )
if n == kn:
continue
UpperCAmelCase : int = copy.deepcopy(_lowerCAmelCase )
UpperCAmelCase : int = kn
UpperCAmelCase : Union[str, Any] = n
UpperCAmelCase : Tuple = 0
for k in _tmp[:-1]:
UpperCAmelCase : str = _tmp[_tmp.index(_lowerCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
UpperCAmelCase : Union[str, Any] = distance + int(i[1] )
_tmp.append(_lowerCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
UpperCAmelCase : Optional[int] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> str:
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : Dict = first_solution
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[int] = distance_of_first_solution
UpperCAmelCase : Union[str, Any] = solution
while count <= iters:
UpperCAmelCase : str = find_neighborhood(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Tuple = neighborhood[index_of_best_solution]
UpperCAmelCase : List[str] = len(_lowerCAmelCase ) - 1
UpperCAmelCase : Optional[Any] = False
while not found:
UpperCAmelCase : int = 0
while i < len(_lowerCAmelCase ):
if best_solution[i] != solution[i]:
UpperCAmelCase : Any = best_solution[i]
UpperCAmelCase : Any = solution[i]
break
UpperCAmelCase : Any = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
UpperCAmelCase : int = True
UpperCAmelCase : Union[str, Any] = best_solution[:-1]
UpperCAmelCase : List[str] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
UpperCAmelCase : Any = cost
UpperCAmelCase : int = solution
else:
UpperCAmelCase : Tuple = index_of_best_solution + 1
UpperCAmelCase : str = neighborhood[index_of_best_solution]
if len(_lowerCAmelCase ) >= size:
tabu_list.pop(0 )
UpperCAmelCase : Optional[int] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( _lowerCAmelCase : Union[str, Any]=None ) -> Tuple:
UpperCAmelCase : Optional[int] = generate_neighbours(args.File )
UpperCAmelCase , UpperCAmelCase : Any = generate_first_solution(
args.File , _lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = tabu_search(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
UpperCamelCase__: List[Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 23
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 0
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _lowerCAmelCase ( A__: Optional[Any] ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowerCAmelCase ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = '''mock-s3-bucket'''
UpperCAmelCase = F"""s3://{mock_bucket}"""
UpperCAmelCase = extract_path_from_uri(A__ )
assert dataset_path.startswith('''s3://''' ) is False
UpperCAmelCase = '''./local/path'''
UpperCAmelCase = extract_path_from_uri(A__ )
assert dataset_path == new_dataset_path
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
UpperCAmelCase = is_remote_filesystem(A__ )
assert is_remote is True
UpperCAmelCase = fsspec.filesystem('''file''' )
UpperCAmelCase = is_remote_filesystem(A__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , A__ )
def _lowerCAmelCase ( A__: Dict , A__: Dict , A__: List[str] , A__: List[Any] , A__: Tuple , A__: Optional[Any] , A__: Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
UpperCAmelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A__ )
UpperCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=A__ )
assert isinstance(A__ , A__ )
UpperCAmelCase = os.path.basename(A__ )
UpperCAmelCase = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(A__ , '''r''' , encoding='''utf-8''' ) as f, open(A__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def _lowerCAmelCase ( A__: Tuple , A__: List[str] , A__: Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
UpperCAmelCase = compressed_file_paths[protocol]
UpperCAmelCase = '''dataset.jsonl'''
UpperCAmelCase = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
UpperCAmelCase , *UpperCAmelCase = fsspec.get_fs_token_paths(A__ )
assert fs.isfile(A__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def _lowerCAmelCase ( A__: List[str] , A__: int , A__: Optional[Any] , A__: List[str] ):
'''simple docstring'''
UpperCAmelCase = hf_api.dataset_info(A__ , token=A__ )
UpperCAmelCase = HfFileSystem(repo_info=A__ , token=A__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(A__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(A__ , A__ , clobber=A__ )
with pytest.warns(A__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(A__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 152
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__magic_name__ = logging.get_logger(__name__)
def _lowerCAmelCase ( A__: nn.ModuleList , A__: nn.ModuleList , A__: List[int] ):
'''simple docstring'''
UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(A__ ) == len(A__ ), F"""{len(A__ )} != {len(A__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__magic_name__ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__magic_name__ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _lowerCAmelCase ( A__: List[str] , A__: Optional[int] ):
'''simple docstring'''
try:
UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(A__ ) )
def _lowerCAmelCase ( A__: Optional[int] , A__: Tuple ):
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(A__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _lowerCAmelCase ( A__: Union[str, PreTrainedModel] , A__: Union[str, Path] = "student" , A__: Union[int, None] = None , A__: Union[int, None] = None , A__: Optional[int]=False , A__: Tuple=None , A__: Any=None , **A__: List[str] , ):
'''simple docstring'''
UpperCAmelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(A__ , A__ ):
AutoTokenizer.from_pretrained(A__ ).save_pretrained(A__ ) # purely for convenience
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).eval()
else:
assert isinstance(A__ , A__ ), F"""teacher must be a model or string got type {type(A__ )}"""
UpperCAmelCase = teacher.config.to_diff_dict()
try:
UpperCAmelCase , UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase = teacher_e
if d is None:
UpperCAmelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
UpperCAmelCase , UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase , UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase = teacher_e
if d is None:
UpperCAmelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(A__ )
# Copy weights
UpperCAmelCase = teacher.config_class(**A__ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(A__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=A__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase , UpperCAmelCase = list(range(A__ ) ), list(range(A__ ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(A__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase = pick_layers_to_copy(A__ , A__ )
if d_layers_to_copy is None:
UpperCAmelCase = pick_layers_to_copy(A__ , A__ )
try:
if hasattr(
A__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , A__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , A__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , A__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , A__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , A__ )
copy_layers(teacher.decoder.block , student.decoder.block , A__ )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
UpperCAmelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(A__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 152
| 1
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
UpperCamelCase_ = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
UpperCamelCase_ = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _UpperCAmelCase ( _lowerCamelCase : str ) -> int:
_lowerCAmelCase : List[Any] = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : Dict = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase : str = numpy_to_pil(UpperCamelCase__ )
return images
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
if images.ndim == 3:
_lowerCAmelCase : List[Any] = images[None, ...]
_lowerCAmelCase : int = (images * 2_55).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_lowerCAmelCase : Union[str, Any] = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
_lowerCAmelCase : Optional[int] = [Image.fromarray(UpperCamelCase__ ) for image in images]
return pil_images
| 309
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a__: Optional[int] = logging.get_logger(__name__)
a__: int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__: Optional[Any] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a__: List[str] = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
a__: Optional[Any] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BertTokenizer
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase="[UNK]",__lowerCamelCase="[SEP]",__lowerCamelCase="[PAD]",__lowerCamelCase="[CLS]",__lowerCamelCase="[MASK]",__lowerCamelCase=True,__lowerCamelCase=None,**__lowerCamelCase,):
super().__init__(
__lowerCamelCase,tokenizer_file=__lowerCamelCase,do_lower_case=__lowerCamelCase,unk_token=__lowerCamelCase,sep_token=__lowerCamelCase,pad_token=__lowerCamelCase,cls_token=__lowerCamelCase,mask_token=__lowerCamelCase,tokenize_chinese_chars=__lowerCamelCase,strip_accents=__lowerCamelCase,**__lowerCamelCase,)
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''',__lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''',__lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''',__lowerCamelCase ) != tokenize_chinese_chars
):
A__ = getattr(__lowerCamelCase,normalizer_state.pop('''type''' ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**__lowerCamelCase )
A__ = do_lower_case
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = self._tokenizer.model.save(__lowerCamelCase,name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 193
| 0
|
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 1_6
UpperCAmelCase_ = 3_2
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int = 16 ):
'''simple docstring'''
UpperCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCAmelCase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Any ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding="""longest""" , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , drop_last=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["""lr"""]
UpperCAmelCase__ = int(config["""num_epochs"""] )
UpperCAmelCase__ = int(config["""seed"""] )
UpperCAmelCase__ = int(config["""batch_size"""] )
UpperCAmelCase__ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ , UpperCAmelCase__ = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = outputs.loss
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 61
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE__ )}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = F'''Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE__ )}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = input_str.split("""_""" )
UpperCAmelCase__ = 0 if use_pascal else 1
UpperCAmelCase__ = words[start_index:]
UpperCAmelCase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase__ = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 61
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = UnCLIPImageVariationPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__SCREAMING_SNAKE_CASE : Optional[int] = IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def _a (self ):
return 32
@property
def _a (self ):
return 32
@property
def _a (self ):
return self.time_input_dim
@property
def _a (self ):
return self.time_input_dim * 4
@property
def _a (self ):
return 100
@property
def _a (self ):
A_ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _a (self ):
torch.manual_seed(0 )
A_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__SCREAMING_SNAKE_CASE )
@property
def _a (self ):
torch.manual_seed(0 )
A_ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__SCREAMING_SNAKE_CASE )
@property
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
A_ : Any = UnCLIPTextProjModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def _a (self ):
torch.manual_seed(0 )
A_ : str = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
A_ : Tuple = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def _a (self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _a (self ):
torch.manual_seed(0 )
A_ : int = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _a (self ):
torch.manual_seed(1 )
A_ : Tuple = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _a (self ):
A_ : List[Any] = self.dummy_decoder
A_ : Optional[Any] = self.dummy_text_proj
A_ : int = self.dummy_text_encoder
A_ : List[str] = self.dummy_tokenizer
A_ : int = self.dummy_super_res_first
A_ : List[str] = self.dummy_super_res_last
A_ : int = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
A_ : Dict = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
A_ : Tuple = CLIPImageProcessor(crop_size=32 , size=32 )
A_ : List[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _a (self , lowercase , lowercase=0 , lowercase=True ):
A_ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
A_ : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
A_ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
if pil_image:
A_ : List[str] = input_image * 0.5 + 0.5
A_ : Optional[Any] = input_image.clamp(0 , 1 )
A_ : str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A_ : Optional[Any] = DiffusionPipeline.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _a (self ):
A_ : str = """cpu"""
A_ : Any = self.get_dummy_components()
A_ : Union[str, Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
A_ : str = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = pipe(**__SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = output.images
A_ : Optional[int] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
A_ : Dict = pipe(
**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
A_ : int = image[0, -3:, -3:, -1]
A_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : int = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
A_ : str = """cpu"""
A_ : Optional[Any] = self.get_dummy_components()
A_ : str = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
A_ : Dict = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
A_ : Any = pipe(**__SCREAMING_SNAKE_CASE )
A_ : List[str] = output.images
A_ : List[str] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
A_ : Dict = pipe(
**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Union[str, Any] = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
A_ : List[Any] = """cpu"""
A_ : Optional[Any] = self.get_dummy_components()
A_ : List[str] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
A_ : int = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
A_ : int = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
A_ : List[str] = pipe(**__SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = output.images
A_ : Optional[int] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
A_ : int = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
A_ : List[Any] = pipe(
**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
A_ : List[Any] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
A_ : str = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
A_ : Dict = torch.device("""cpu""" )
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
A_ : Tuple = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
A_ : Tuple = pipe.decoder.dtype
A_ : Any = 1
A_ : List[str] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
A_ : List[Any] = pipe.prepare_latents(
__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
A_ : Tuple = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
A_ : str = pipe.prepare_latents(
__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
A_ : Optional[Any] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
A_ : Tuple = pipe(
**__SCREAMING_SNAKE_CASE , decoder_latents=__SCREAMING_SNAKE_CASE , super_res_latents=__SCREAMING_SNAKE_CASE ).images
A_ : Optional[int] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
A_ : List[str] = pipeline_inputs.pop("""image""" )
A_ : Tuple = pipe.image_encoder(__SCREAMING_SNAKE_CASE ).image_embeds
A_ : Optional[Any] = pipe(
**__SCREAMING_SNAKE_CASE , decoder_latents=__SCREAMING_SNAKE_CASE , super_res_latents=__SCREAMING_SNAKE_CASE , image_embeddings=__SCREAMING_SNAKE_CASE , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _a (self ):
A_ : Tuple = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
A_ : str = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=__SCREAMING_SNAKE_CASE )
@skip_mps
def _a (self ):
A_ : Union[str, Any] = torch_device == """cpu"""
A_ : List[str] = True
A_ : Optional[Any] = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=__SCREAMING_SNAKE_CASE , relax_max_difference=__SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE , )
def _a (self ):
A_ : str = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
A_ : List[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE )
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_local()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
A_ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
A_ : int = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
A_ : Any = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
A_ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ : Any = pipeline(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
A_ : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 15 )
| 206
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "linear"
lowerCAmelCase__ = "cosine"
lowerCAmelCase__ = "cosine_with_restarts"
lowerCAmelCase__ = "polynomial"
lowerCAmelCase__ = "constant"
lowerCAmelCase__ = "constant_with_warmup"
lowerCAmelCase__ = "piecewise_constant"
def a__ ( a__ , a__ = -1 ):
"""simple docstring"""
return LambdaLR(a__ , lambda a__ : 1 , last_epoch=a__ )
def a__ ( a__ , a__ , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1.0 , a__ ) )
return 1.0
return LambdaLR(a__ , a__ , last_epoch=a__ )
def a__ ( a__ , a__ , a__ = -1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(a__ )
__SCREAMING_SNAKE_CASE = float(a__ )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(a__ , a__ ):
def rule_func(a__ ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(a__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(a__ , a__ )
return LambdaLR(a__ , a__ , last_epoch=a__ )
def a__ ( a__ , a__ , a__ , a__=-1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ = 0.5 , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(a__ ) * 2.0 * progress )) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ = 1 , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(a__ ) * progress) % 1.0) )) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__=1E-7 , a__=1.0 , a__=-1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(a__ , a__ , a__ )
UpperCAmelCase : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a__ ( a__ , a__ , a__ = None , a__ = None , a__ = None , a__ = 1 , a__ = 1.0 , a__ = -1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = SchedulerType(a__ )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(a__ , last_epoch=a__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(a__ , step_rules=a__ , last_epoch=a__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(a__ , num_warmup_steps=a__ , last_epoch=a__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , num_cycles=a__ , last_epoch=a__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , power=a__ , last_epoch=a__ , )
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , last_epoch=a__ )
| 267
| 0
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# Initialise PyTorch model
_A : List[Any] = MobileBertConfig.from_json_file(snake_case_ )
print(f'''Building PyTorch model from configuration: {config}''' )
_A : Tuple = MobileBertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
_A : Tuple = load_tf_weights_in_mobilebert(snake_case_,snake_case_,snake_case_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(),snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 343
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
@property
def a__ ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> List[Any]:
_A : int = ort.SessionOptions()
_A : Any = False
return options
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_A : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = """A red cat sitting on a park bench"""
_A : Optional[Any] = np.random.RandomState(0 )
_A : Dict = pipe(
prompt=_a , image=_a , mask_image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_a , output_type="""np""" , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 343
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__A : Tuple = logging.get_logger(__name__)
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float , **UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = feature_size
lowerCAmelCase : Any = sampling_rate
lowerCAmelCase : Tuple = padding_value
lowerCAmelCase : Optional[int] = kwargs.pop('padding_side' , 'right' )
lowerCAmelCase : int = kwargs.pop('return_attention_mask' , UpperCAmelCase_ )
super().__init__(**UpperCAmelCase_ )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = True , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowerCAmelCase : Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
lowerCAmelCase : Optional[Any] = processed_features[self.model_input_names[0]]
lowerCAmelCase : int = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase_ ) == 0:
if return_attention_mask:
lowerCAmelCase : Optional[int] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowerCAmelCase : Tuple = required_input[0]
if isinstance(UpperCAmelCase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowerCAmelCase : Tuple = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase_ ):
lowerCAmelCase : List[str] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase_ ):
lowerCAmelCase : Tuple = 'tf'
elif is_torch_tensor(UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = 'pt'
elif isinstance(UpperCAmelCase_ , (int, float, list, tuple, np.ndarray) ):
lowerCAmelCase : List[Any] = 'np'
else:
raise ValueError(
f"type of {first_element} unknown: {type(UpperCAmelCase_ )}. "
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowerCAmelCase : Optional[Any] = to_numpy(UpperCAmelCase_ )
else:
lowerCAmelCase : str = [to_numpy(UpperCAmelCase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowerCAmelCase : Tuple = self._get_padding_strategies(padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
lowerCAmelCase : Any = processed_features[self.model_input_names[0]]
lowerCAmelCase : Union[str, Any] = len(UpperCAmelCase_ )
if not all(len(UpperCAmelCase_ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
lowerCAmelCase : List[Any] = []
for i in range(UpperCAmelCase_ ):
lowerCAmelCase : int = {k: v[i] for k, v in processed_features.items()}
# truncation
lowerCAmelCase : List[Any] = self._truncate(
UpperCAmelCase_ , max_length=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , truncation=UpperCAmelCase_ , )
truncated_inputs.append(UpperCAmelCase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowerCAmelCase : Optional[int] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowerCAmelCase : Optional[int] = PaddingStrategy.MAX_LENGTH
lowerCAmelCase : Any = {}
for i in range(UpperCAmelCase_ ):
# padding
lowerCAmelCase : List[str] = self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase_ , padding_strategy=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowerCAmelCase : Any = []
if value.dtype is np.dtype(np.floataa ):
lowerCAmelCase : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase_ )
return BatchFeature(UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Tuple = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowerCAmelCase : Tuple = len(UpperCAmelCase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase : Any = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowerCAmelCase : List[str] = np.ones(len(UpperCAmelCase_ ) , dtype=np.intaa )
if needs_to_be_padded:
lowerCAmelCase : List[str] = max_length - len(UpperCAmelCase_ )
if self.padding_side == "right":
if return_attention_mask:
lowerCAmelCase : Union[str, Any] = np.pad(
processed_features['attention_mask'] , (0, difference) )
lowerCAmelCase : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowerCAmelCase : str = np.pad(
UpperCAmelCase_ , UpperCAmelCase_ , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowerCAmelCase : Union[str, Any] = np.pad(
processed_features['attention_mask'] , (difference, 0) )
lowerCAmelCase : List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowerCAmelCase : Optional[int] = np.pad(
UpperCAmelCase_ , UpperCAmelCase_ , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
lowerCAmelCase : Dict = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase : List[str] = len(UpperCAmelCase_ ) > max_length
if needs_to_be_truncated:
lowerCAmelCase : List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowerCAmelCase : Optional[Any] = processed_features['attention_mask'][:max_length]
return processed_features
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=None ):
# Get padding strategy
if padding is not False:
if padding is True:
lowerCAmelCase : List[str] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : List[Any] = PaddingStrategy(UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : List[Any] = padding
else:
lowerCAmelCase : Optional[int] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 138
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(_UpperCAmelCase, 2 ) - a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
return 2 * x
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Any = 2.0
while start <= a:
lowerCAmelCase : Dict = math.pow(_UpperCAmelCase, 2 )
return start
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 9_999, _UpperCAmelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError('math domain error' )
lowerCAmelCase : Optional[Any] = get_initial_point(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
lowerCAmelCase : Any = value
lowerCAmelCase : int = value - fx(_UpperCAmelCase, _UpperCAmelCase ) / fx_derivative(_UpperCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 138
| 1
|
from __future__ import annotations
from collections.abc import Callable
def A_ ( A__ , A__ , A__ , A__ = 100 , ) -> float:
a__ : Dict = x_start
a__ : Any = fnc(A__ )
a__ : Optional[int] = 0.0
for _ in range(A__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a__ : Union[str, Any] = (x_end - x_start) / steps + xa
a__ : str = fnc(A__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a__ : Optional[Any] = xa
a__ : Optional[int] = fxa
return area
if __name__ == "__main__":
def A_ ( A__ ) -> List[str]:
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
lowercase : Union[str, Any] = 1_0
while i <= 1_0_0_0_0_0:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 1_0
| 225
|
def A_ ( A__ ) -> List[str]: # noqa: E741
a__ : Dict = len(A__ )
a__ : str = 0
a__ : Any = [0] * n
a__ : int = [False] * n
a__ : Optional[Any] = [False] * n
def dfs(A__ , A__ , A__ , A__ ):
if parent == root:
out_edge_count += 1
a__ : Union[str, Any] = True
a__ : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
a__ : List[Any] = dfs(A__ , A__ , A__ , A__ )
a__ : Dict = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
a__ : Dict = True
# AP found via cycle
if at == low[to]:
a__ : List[Any] = True
else:
a__ : Optional[int] = min(low[at] , A__ )
return out_edge_count
for i in range(A__ ):
if not visited[i]:
a__ : Tuple = 0
a__ : Any = dfs(A__ , A__ , -1 , A__ )
a__ : List[Any] = out_edge_count > 1
for x in range(len(A__ ) ):
if is_art[x] is True:
print(A__ )
# Adjacency list of graph
lowercase : List[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 225
| 1
|
'''simple docstring'''
import socket
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE__ : str =socket.gethostname()
SCREAMING_SNAKE_CASE__ : List[Any] =1_2_3_1_2
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
SCREAMING_SNAKE_CASE__ : List[str] =sock.recv(1_0_2_4 )
if not data:
break
out_file.write(UpperCamelCase__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 152
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """ibert"""
def __init__( self : Optional[int] , __lowercase : List[str]=3_05_22 , __lowercase : Tuple=7_68 , __lowercase : str=12 , __lowercase : Optional[int]=12 , __lowercase : Optional[Any]=30_72 , __lowercase : str="gelu" , __lowercase : List[str]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[str]=5_12 , __lowercase : str=2 , __lowercase : Tuple=0.02 , __lowercase : Union[str, Any]=1e-12 , __lowercase : List[Any]=1 , __lowercase : List[str]=0 , __lowercase : Optional[Any]=2 , __lowercase : int="absolute" , __lowercase : Tuple=False , __lowercase : int="none" , **__lowercase : Optional[Any] , ) -> List[Any]:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Any =vocab_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =hidden_act
SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict =type_vocab_size
SCREAMING_SNAKE_CASE__ : Tuple =initializer_range
SCREAMING_SNAKE_CASE__ : str =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple =position_embedding_type
SCREAMING_SNAKE_CASE__ : Any =quant_mode
SCREAMING_SNAKE_CASE__ : Optional[int] =force_dequant
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@property
def __magic_name__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : str ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Any ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 152
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = inspect.getfile(accelerate.test_utils )
A_ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
A_ : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
A_ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def _a (self ):
print(F'Found {torch.cuda.device_count()} devices.' )
A_ : Union[str, Any] = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def _a (self ):
print(F'Found {torch.cuda.device_count()} devices.' )
A_ : Tuple = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def _a (self ):
A_ : int = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
@require_multi_gpu
def _a (self ):
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
A_ : Union[str, Any] = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(lowercase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase :List[Any] = Accelerator()
lowerCamelCase :int = (accelerator.state.process_index + 2, 1_0)
lowerCamelCase :str = torch.randint(0, 1_0, shape).to(accelerator.device)
lowerCamelCase :str = ''''''
lowerCamelCase :Union[str, Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase :Tuple = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase :Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 135
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :Dict = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 135
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = 100 , lowercase_ = None , lowercase_ = None , lowercase_ = True , ):
"""simple docstring"""
if audio_length_in_s is None:
UpperCAmelCase_ : Tuple = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase_ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase_ : Tuple = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase_ : Tuple = int(lowercase_ )
if sample_size % down_scale_factor != 0:
UpperCAmelCase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
" process." )
UpperCAmelCase_ : Tuple = int(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase_ : Dict = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase_ : int = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
UpperCAmelCase_ : List[str] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_ : int = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase_ : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 61
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
UpperCAmelCase_ : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=16 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=32 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.02 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : str = pad_token_id
UpperCAmelCase_ : str = bos_token_id
UpperCAmelCase_ : List[Any] = initializer_range
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ : str = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
UpperCAmelCase_ : Optional[int] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : int = model_class_name(lowercase_ )
UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : int = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Optional[Any] = model.decode(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 20
UpperCAmelCase_ : Any = model_class_name(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : int = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
UpperCAmelCase_ : Dict = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
UpperCAmelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 99
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : Any = input_ids.shape[0]
UpperCAmelCase_ : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._get_config_and_data()
UpperCAmelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : Optional[int] = lm_model(input_ids=lowercase_ )
UpperCAmelCase_ : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
UpperCAmelCase_ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ : Tuple = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
UpperCAmelCase_ : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase_ : Dict = shift_tokens_right(lowercase_ , 1 , 2 )
UpperCAmelCase_ : Tuple = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : Optional[Any] = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ (lowercase__ ,unittest.TestCase ,lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = FlaxBlenderbotSmallModelTester(self )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = model_class(lowercase_ )
@jax.jit
def encode_jitted(lowercase_ , lowercase_=None , **lowercase_ ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : List[Any] = encode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[Any] = encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
UpperCAmelCase_ : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCAmelCase_ : int = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ , lowercase_ , lowercase_ ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : str = decode_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : List[Any] = decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 61
| 1
|
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = len(UpperCAmelCase_ )
while cur > 1:
# Find the maximum number in arr
A_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A_ = arr[mi::-1] + arr[mi + 1 : len(UpperCAmelCase_ )]
# Reverse whole list
A_ = arr[cur - 1 :: -1] + arr[cur : len(UpperCAmelCase_ )]
cur -= 1
return arr
if __name__ == "__main__":
__a :Tuple = input('Enter numbers separated by a comma:\n').strip()
__a :Any = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 357
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329
| 0
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# Initialise PyTorch model
UpperCamelCase = MobileBertConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase = MobileBertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
UpperCamelCase = load_tf_weights_in_mobilebert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 343
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase = (self.patch_size, self.patch_size)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxViTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 343
| 1
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Any=13 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : List[Any]=[32, 64, 1_28] , lowerCamelCase__ : List[Any]=[1, 2, 1] , lowerCamelCase__ : List[Any]=[2, 2, 4] , lowerCamelCase__ : Any=2 , lowerCamelCase__ : Optional[int]=2.0 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Any=0.0_2 , lowerCamelCase__ : Optional[int]=1E-5 , lowerCamelCase__ : Any=True , lowerCamelCase__ : str=None , lowerCamelCase__ : int=True , lowerCamelCase__ : int=10 , lowerCamelCase__ : int=8 , lowerCamelCase__ : Optional[int]=["stage1", "stage2"] , lowerCamelCase__ : Optional[Any]=[1, 2] , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : str = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Optional[int] = embed_dim
_UpperCAmelCase : Union[str, Any] = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : Union[str, Any] = num_heads
_UpperCAmelCase : str = window_size
_UpperCAmelCase : str = mlp_ratio
_UpperCAmelCase : Tuple = qkv_bias
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = drop_path_rate
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Tuple = use_absolute_embeddings
_UpperCAmelCase : Any = patch_norm
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = scope
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : Optional[int] = encoder_stride
_UpperCAmelCase : Union[str, Any] = out_features
_UpperCAmelCase : Optional[int] = out_indices
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = FocalNetModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Dict = model(lowerCAmelCase__ )
_UpperCAmelCase : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[Any] = FocalNetBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : int = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : Any = 1
_UpperCAmelCase : List[Any] = FocalNetForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : List[Any] = FocalNetForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = config_and_inputs
_UpperCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[str] = False
lowerCAmelCase : str = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[int] = False
def lowerCAmelCase__ ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FocalNetModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
return
def lowerCAmelCase__ ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : str = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : Optional[Any] = model_class(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_UpperCAmelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : str = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Dict = outputs.hidden_states
_UpperCAmelCase : List[str] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# FocalNet has a different seq_length
_UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = reshaped_hidden_states[0].shape
_UpperCAmelCase : Tuple = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Dict = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : str = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = FocalNetModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase : List[str] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase : List[Any] = FocalNetConfig
lowerCAmelCase : Optional[Any] = False
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = FocalNetModelTester(self )
| 366
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 0
|
def UpperCAmelCase_ ( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : int ) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : str=False ) -> List[str]:
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ''
else:
SCREAMING_SNAKE_CASE_ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE_ = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = val
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ = ViTMSNConfig()
SCREAMING_SNAKE_CASE_ = 10_00
SCREAMING_SNAKE_CASE_ = 'datasets/huggingface/label-files'
SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase ) , 'r' ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 3_84
SCREAMING_SNAKE_CASE_ = 15_36
SCREAMING_SNAKE_CASE_ = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 10_24
SCREAMING_SNAKE_CASE_ = 40_96
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = 10_24
SCREAMING_SNAKE_CASE_ = 40_96
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = ViTMSNModel(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='cpu' )['target_encoder']
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , base_model=__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(
size=config.image_size , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = image_processor(images=__UpperCAmelCase , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __UpperCAmelCase , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 225
| 1
|
def lowercase__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError('input must be a negative integer' )
lowercase : str = len(bin(_lowerCAmelCase )[3:] )
lowercase : Union[str, Any] = bin(abs(_lowerCAmelCase ) - (1 << binary_number_length) )[3:]
lowercase : Dict = (
(
"""1"""
+ """0""" * (binary_number_length - len(_lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'dandelin/vilt-b32-finetuned-vqa'
_lowerCamelCase = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
_lowerCamelCase = 'image_qa'
_lowerCamelCase = AutoProcessor
_lowerCamelCase = AutoModelForVisualQuestionAnswering
_lowerCamelCase = ['image', 'text']
_lowerCamelCase = ['text']
def __init__( self : List[str], *lowerCAmelCase : Optional[Any], **lowerCAmelCase : Optional[Any] ) -> str:
requires_backends(self, ['vision'] )
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[Any], lowerCAmelCase : "Image", lowerCAmelCase : str ) -> Dict:
return self.pre_processor(lowerCAmelCase, lowerCAmelCase, return_tensors='pt' )
def lowercase ( self : List[Any], lowerCAmelCase : int ) -> Tuple:
with torch.no_grad():
return self.model(**lowerCAmelCase ).logits
def lowercase ( self : List[str], lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
lowercase : Optional[int] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 53
| 0
|
"""simple docstring"""
import numpy as np
from PIL import Image
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Dict = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Union[str, Any] = 0
# compute the shape of the output matrix
__lowerCamelCase : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCamelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCamelCase : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : Any = 0
__lowerCamelCase : Optional[int] = 0
return updated_arr
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : str = 0
# compute the shape of the output matrix
__lowerCamelCase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCamelCase : List[str] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCamelCase : Optional[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__A = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 135
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = '''pt'''
elif is_tf_available():
__A = '''tf'''
else:
__A = '''jax'''
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = PerceiverTokenizer
snake_case__ = False
def lowerCamelCase__ ( self : List[str] ):
super().setUp()
__lowerCamelCase : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self : Dict ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def lowerCamelCase__ ( self : List[Any] , **UpperCAmelCase : str ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=False , UpperCAmelCase : str=20 , UpperCAmelCase : Union[str, Any]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCamelCase : Dict = []
for i in range(len(UpperCAmelCase ) ):
try:
__lowerCamelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCamelCase : Any = list(filter(lambda UpperCAmelCase : re.match(r"^[ a-zA-Z]+$" , t[1] ) , UpperCAmelCase ) )
__lowerCamelCase : str = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
__lowerCamelCase : Optional[int] = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
__lowerCamelCase : int = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase : str = [t[0] for t in toks]
# Ensure consistency
__lowerCamelCase : Optional[int] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
__lowerCamelCase : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
__lowerCamelCase : Optional[int] = " " + output_txt
__lowerCamelCase : List[Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = self.perceiver_tokenizer
__lowerCamelCase : Optional[int] = "Unicode €."
__lowerCamelCase : Union[str, Any] = tokenizer(UpperCAmelCase )
__lowerCamelCase : List[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , UpperCAmelCase )
# decoding
__lowerCamelCase : List[Any] = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , "[CLS]Unicode €.[SEP]" )
__lowerCamelCase : Optional[Any] = tokenizer("e è é ê ë" )
__lowerCamelCase : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , UpperCAmelCase )
# decoding
__lowerCamelCase : Optional[Any] = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Dict = self.perceiver_tokenizer
__lowerCamelCase : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__lowerCamelCase : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__lowerCamelCase : Dict = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
__lowerCamelCase : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
__lowerCamelCase : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = self.perceiver_tokenizer
__lowerCamelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__lowerCamelCase : str = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , UpperCAmelCase )
self.assertIn("attention_mask" , UpperCAmelCase )
self.assertNotIn("decoder_input_ids" , UpperCAmelCase )
self.assertNotIn("decoder_attention_mask" , UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : str = self.perceiver_tokenizer
__lowerCamelCase : Union[str, Any] = [
"Summary of the text.",
"Another summary.",
]
__lowerCamelCase : int = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding="max_length" , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCamelCase__ ( self : str ):
# safety check on max_len default value so we are sure the test works
__lowerCamelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCamelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : int = tempfile.mkdtemp()
__lowerCamelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__lowerCamelCase : Any = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Tuple = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
__lowerCamelCase : int = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
__lowerCamelCase : Dict = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
__lowerCamelCase : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__lowerCamelCase : Tuple = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__lowerCamelCase : Optional[int] = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__lowerCamelCase : Any = json.load(UpperCAmelCase )
__lowerCamelCase : Tuple = [F"""<extra_id_{i}>""" for i in range(125 )]
__lowerCamelCase : Dict = added_tokens_extra_ids + [
"an_additional_special_token"
]
__lowerCamelCase : Any = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCamelCase : List[Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCAmelCase )]
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def lowerCamelCase__ ( self : List[str] ):
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Optional[int] ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__lowerCamelCase : List[str] = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
__lowerCamelCase : Any = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
| 135
| 1
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
UpperCAmelCase = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(_snake_case ):
os.makedirs(_snake_case )
UpperCAmelCase = model.state_dict()
def to_tf_var_name(_snake_case ):
for patt, repl in iter(_snake_case ):
UpperCAmelCase = name.replace(_snake_case , _snake_case )
return F'''bert/{name}'''
def create_tf_var(_snake_case , _snake_case , _snake_case ):
UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase = tf.get_variable(dtype=_snake_case , shape=tensor.shape , name=_snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase = to_tf_var_name(_snake_case )
UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase = torch_tensor.T
UpperCAmelCase = create_tf_var(tensor=_snake_case , name=_snake_case , session=_snake_case )
tf.keras.backend.set_value(_snake_case , _snake_case )
UpperCAmelCase = session.run(_snake_case )
print(F'''Successfully created {tf_name}: {np.allclose(_snake_case , _snake_case )}''' )
UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(_snake_case , os.path.join(_snake_case , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def _a ( _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=_snake_case , required=_snake_case , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=_snake_case , default=_snake_case , required=_snake_case , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=_snake_case , required=_snake_case , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=_snake_case , required=_snake_case , help="""Directory in which to save tensorflow model""" )
UpperCAmelCase = parser.parse_args(_snake_case )
UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 367
|
"""simple docstring"""
import numpy as np
def _a ( _snake_case ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
| 0
|
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[int] =[int(SCREAMING_SNAKE_CASE ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = input().strip()
UpperCAmelCase : Optional[int] = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 95
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _a ( _lowerCAmelCase , _lowerCAmelCase ):
A = 1
@register_to_config
def __init__(self, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=20, SCREAMING_SNAKE_CASE_=1E-3 ) -> str:
UpperCAmelCase_: List[str] = None
UpperCAmelCase_: str = None
UpperCAmelCase_: List[str] = None
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Optional[Any]:
UpperCAmelCase_: Dict = torch.linspace(1, self.config.sampling_eps, SCREAMING_SNAKE_CASE_, device=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCAmelCase_: Any = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCAmelCase_: List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCAmelCase_: Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCAmelCase_: Dict = std.unsqueeze(-1 )
UpperCAmelCase_: List[str] = -score / std
# compute
UpperCAmelCase_: Any = -1.0 / len(self.timesteps )
UpperCAmelCase_: List[Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCAmelCase_: Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCAmelCase_: List[str] = beta_t.unsqueeze(-1 )
UpperCAmelCase_: str = -0.5 * beta_t * x
UpperCAmelCase_: Dict = torch.sqrt(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = drift - diffusion**2 * score
UpperCAmelCase_: List[Any] = x + drift * dt
# add noise
UpperCAmelCase_: int = randn_tensor(x.shape, layout=x.layout, generator=SCREAMING_SNAKE_CASE_, device=x.device, dtype=x.dtype )
UpperCAmelCase_: List[str] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> Tuple:
return self.config.num_train_timesteps
| 82
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : Dict = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a : str = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
a : Optional[int] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = SqueezeBertTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(
SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, tokenize_chinese_chars=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Optional[Any] = getattr(SCREAMING_SNAKE_CASE_, normalizer_state.pop("""type""" ) )
UpperCAmelCase_: Optional[Any] = do_lower_case
UpperCAmelCase_: int = strip_accents
UpperCAmelCase_: int = tokenize_chinese_chars
UpperCAmelCase_: List[Any] = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = do_lower_case
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: List[Any] = [self.sep_token_id]
UpperCAmelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 82
| 1
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCAmelCase_ : Optional[Any] = '''.'''
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCAmelCase_ : Any = line.strip()
UpperCAmelCase_ : int = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCAmelCase_ : Union[str, Any] = '''\n'''.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 38
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=1_3 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=9_9 , UpperCAmelCase : Optional[int]=3_2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=3_7 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=5_1_2 , UpperCAmelCase : Dict=1_6 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : List[Any]=4 , ) -> Optional[Any]:
__lowerCAmelCase: str = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Optional[int] = seq_length
__lowerCAmelCase: Dict = is_training
__lowerCAmelCase: Optional[Any] = use_attention_mask
__lowerCAmelCase: List[Any] = use_token_type_ids
__lowerCAmelCase: Optional[int] = use_labels
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: List[str] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Optional[int] = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: int = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: Any = num_choices
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: List[Any] = None
if self.use_attention_mask:
__lowerCAmelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Optional[Any] = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase: Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = config_and_inputs
__lowerCAmelCase: Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( snake_case__ , unittest.TestCase ):
_lowercase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase: List[Any] = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCAmelCase: Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
@require_flax
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
__lowerCAmelCase: Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase: Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
__lowerCAmelCase: str = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase )
__lowerCAmelCase: List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1E-4 ) )
| 322
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Any = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 366
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch', 'scipy']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 9
| 0
|
'''simple docstring'''
_A : Any ={'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
_A : Optional[Any] =['''a''', '''b''', '''c''', '''d''', '''e''']
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase__ : str = start
# add current to visited
visited.append(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase__ : str = topological_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase ) != len(UpperCamelCase ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase__ : Union[str, Any] = topological_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# return sort
return sort
if __name__ == "__main__":
_A : Optional[Any] =topological_sort('''a''', [], [])
print(sort)
| 41
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa"
SCREAMING_SNAKE_CASE_ : Dict =(
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
SCREAMING_SNAKE_CASE_ : List[str] ="document_qa"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__A , **__A )
def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ):
__UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__UpperCamelCase = task_prompt.replace('{user_input}' , __A )
__UpperCamelCase = self.pre_processor.tokenizer(
__A , add_special_tokens=__A , return_tensors='pt' ).input_ids
__UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences
def _lowerCamelCase ( self : Tuple , __A : List[Any] ):
__UpperCamelCase = self.pre_processor.batch_decode(__A )[0]
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token
__UpperCamelCase = self.pre_processor.tokenajson(__A )
return sequence["answer"]
| 53
| 0
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase : Tuple = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowerCAmelCase : int = []
lowerCAmelCase : Any = []
lowerCAmelCase : List[Any] = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowerCAmelCase : Any = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'emoji': True,
},
}
]
lowerCAmelCase : List[str] = 0
for log in Path().glob('*.log'):
lowerCAmelCase : List[str] = 0
with open(log, 'r') as f:
for line in f:
lowerCAmelCase : Dict = json.loads(line)
if line.get('nodeid', '') != "":
lowerCAmelCase : Tuple = line['nodeid']
if line.get('duration', None) is not None:
lowerCAmelCase : List[str] = f"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase : Optional[Any] = []
log.unlink()
lowerCAmelCase : int = ''
lowerCAmelCase : Tuple = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = {}
for test in failed_tests:
lowerCAmelCase : int = test[0].split('::')
lowerCAmelCase : Optional[int] = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowerCAmelCase : Tuple = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase : Optional[int] = [test[0] for test in failed_table]
lowerCAmelCase : int = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase : List[Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase : int = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
lowerCAmelCase : Any = 'Too many failed tests, please see the full report in the Action results.'
lowerCAmelCase : int = len(err) + 10
lowerCAmelCase : Tuple = message[: 30_00 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
lowerCAmelCase : List[Any] = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowerCAmelCase : List[str] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowerCAmelCase : Tuple = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowerCAmelCase : Optional[int] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
lowerCAmelCase : Optional[int] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCAmelCase : List[Any] = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowerCAmelCase : str = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase : str = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase : Optional[Any] = row[0]
else:
lowerCAmelCase : int = ''
lowerCAmelCase : int = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 251
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A_( A : List[Any]):
UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A , A)
def A_( A : Any):
UpperCamelCase = list(s_dict.keys())
for key in keys:
if "transformer_layers" in key:
UpperCamelCase = s_dict.pop(A)
elif "subsample" in key:
UpperCamelCase = s_dict.pop(A)
def A_( A : Optional[int]):
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(A , A , bias=A)
UpperCamelCase = emb.weight.data
return lin_layer
def A_( A : Optional[int] , A : List[str]):
UpperCamelCase = torch.load(A , map_location='cpu')
UpperCamelCase = mam_aaa['args']
UpperCamelCase = mam_aaa['model']
UpperCamelCase = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(A)
rename_keys(A)
UpperCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
UpperCamelCase = args.share_decoder_input_output_embed
UpperCamelCase = [int(A) for i in args.conv_kernel_sizes.split(',')]
UpperCamelCase = SpeechaTextConfig(
vocab_size=A , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(A) , conv_channels=args.conv_channels , conv_kernel_sizes=A , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=A , num_beams=5 , max_length=200 , use_cache=A , decoder_start_token_id=2 , early_stopping=A , )
UpperCamelCase = SpeechaTextForConditionalGeneration(A)
UpperCamelCase , UpperCamelCase = model.model.load_state_dict(A , strict=A)
if len(A) > 0 and not set(A) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f''' but all the following weights are missing {missing}''')
if tie_embeds:
UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
UpperCamelCase = lm_head_weights
model.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase : List[str] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 251
| 1
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
__a = input_paths_and_base_extractors[compression_format]
if input_path is None:
__a = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
assert base_extractor.is_extractable(__lowerCAmelCase )
__a = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__a = file_path.read_text(encoding='''utf-8''' )
else:
__a = output_path.read_text(encoding='''utf-8''' )
__a = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
__a = input_paths[compression_format]
if input_path is None:
__a = f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
__a = Extractor.infer_extractor_format(__lowerCAmelCase )
assert extractor_format is not None
__a = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__a = file_path.read_text(encoding='''utf-8''' )
else:
__a = output_path.read_text(encoding='''utf-8''' )
__a = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
import tarfile
__a = tmp_path / "data_dot_dot"
directory.mkdir()
__a = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__lowerCAmelCase , '''w''' ) as f:
f.add(__lowerCAmelCase , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
import tarfile
__a = tmp_path / "data_sym_link"
directory.mkdir()
__a = directory / "tar_file_with_sym_link.tar"
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=__lowerCAmelCase )
with tarfile.TarFile(__lowerCAmelCase , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
__a = insecure_tar_files[insecure_tar_file]
__a = tmp_path / "extracted"
TarExtractor.extract(__lowerCAmelCase , __lowerCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __snake_case ( _UpperCAmelCase ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__a = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
__a = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(__lowerCAmelCase )
assert zipfile.is_zipfile(str(__lowerCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__lowerCAmelCase ) # but we're right
| 49
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ (__lowercase ):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class __magic_name__ (__lowercase , __lowercase ):
lowerCamelCase__ = 1
@register_to_config
def __init__( self , _a = 2000 , _a = 0.1_5 , _a = 0.0_1 , _a = 1348.0 , _a = 1E-5 , _a = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
lowerCAmelCase_ = sigma_max
# setable values
lowerCAmelCase_ = None
self.set_sigmas(_a , _a , _a , _a )
def __a ( self , _a , _a = None ) -> torch.FloatTensor:
return sample
def __a ( self , _a , _a = None , _a = None ) -> Any:
lowerCAmelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCAmelCase_ = torch.linspace(1 , _a , _a , device=_a )
def __a ( self , _a , _a = None , _a = None , _a = None ) -> Optional[int]:
lowerCAmelCase_ = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCAmelCase_ = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCAmelCase_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_a , _a )
lowerCAmelCase_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCAmelCase_ = torch.exp(torch.linspace(math.log(_a ) , math.log(_a ) , _a ) )
lowerCAmelCase_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __a ( self , _a , _a ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __a ( self , _a , _a , _a , _a = None , _a = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
lowerCAmelCase_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCAmelCase_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCAmelCase_ = timesteps.to(self.discrete_sigmas.device )
lowerCAmelCase_ = self.discrete_sigmas[timesteps].to(sample.device )
lowerCAmelCase_ = self.get_adjacent_sigma(_a , _a ).to(sample.device )
lowerCAmelCase_ = torch.zeros_like(_a )
lowerCAmelCase_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCAmelCase_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCAmelCase_ = diffusion.unsqueeze(-1 )
lowerCAmelCase_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCAmelCase_ = randn_tensor(
sample.shape , layout=sample.layout , generator=_a , device=sample.device , dtype=sample.dtype )
lowerCAmelCase_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCAmelCase_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_a , prev_sample_mean=_a )
def __a ( self , _a , _a , _a = None , _a = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCAmelCase_ = randn_tensor(sample.shape , layout=sample.layout , generator=_a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCAmelCase_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCAmelCase_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCAmelCase_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCAmelCase_ = step_size.unsqueeze(-1 )
lowerCAmelCase_ = sample + step_size * model_output
lowerCAmelCase_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __a ( self , _a , _a , _a , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase_ = timesteps.to(original_samples.device )
lowerCAmelCase_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCAmelCase_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_a ) * sigmas[:, None, None, None]
)
lowerCAmelCase_ = noise + original_samples
return noisy_samples
def __len__( self ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 351
|
import string
from math import logaa
def A(__a: str , __a: str ):
lowerCAmelCase_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A(__a: str , __a: str ):
lowerCAmelCase_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCAmelCase_ = corpus_without_punctuation.split("\n" )
lowerCAmelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__a ))
def A(__a: int , __a: int , __a: List[Any]=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def A(__a: int , __a: int ):
return round(tf * idf , 3 )
| 22
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A__ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowerCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowerCamelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
_lowerCAmelCase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
_lowerCAmelCase = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
_lowerCAmelCase = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
_lowerCAmelCase = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
_lowerCAmelCase = text_classifier("""This is great !""" , return_all_scores=_snake_case )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
_lowerCAmelCase = text_classifier("""This is great !""" , return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
_lowerCAmelCase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
_lowerCAmelCase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def snake_case ( self ):
"""simple docstring"""
import torch
_lowerCAmelCase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
_lowerCAmelCase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
_lowerCAmelCase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = pipeline("""text-classification""" )
_lowerCAmelCase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
_lowerCAmelCase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
_lowerCAmelCase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = pipeline("""text-classification""" , framework="""tf""" )
_lowerCAmelCase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
_lowerCAmelCase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
_lowerCAmelCase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TextClassificationPipeline(model=_snake_case , tokenizer=_snake_case )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_lowerCAmelCase = """HuggingFace is in"""
_lowerCAmelCase = text_classifier(_snake_case )
self.assertEqual(nested_simplify(_snake_case ) , [{"""label""": ANY(_snake_case ), """score""": ANY(_snake_case )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
_lowerCAmelCase = ["""HuggingFace is in """, """Paris is in France"""]
_lowerCAmelCase = text_classifier(_snake_case )
self.assertEqual(
nested_simplify(_snake_case ) , [{"""label""": ANY(_snake_case ), """score""": ANY(_snake_case )}, {"""label""": ANY(_snake_case ), """score""": ANY(_snake_case )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_lowerCAmelCase = text_classifier(_snake_case , top_k=_snake_case )
_lowerCAmelCase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_snake_case ) , [[{"""label""": ANY(_snake_case ), """score""": ANY(_snake_case )}] * N, [{"""label""": ANY(_snake_case ), """score""": ANY(_snake_case )}] * N] , )
_lowerCAmelCase = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
_lowerCAmelCase = text_classifier(_snake_case )
self.assertEqual(
nested_simplify(_snake_case ) , {"""label""": ANY(_snake_case ), """score""": ANY(_snake_case )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_lowerCAmelCase = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_snake_case ):
text_classifier(_snake_case )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_lowerCAmelCase = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_snake_case ) , [{"""label""": ANY(_snake_case ), """score""": ANY(_snake_case )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 82
|
from __future__ import annotations
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = str(snake_case )
return n == n[::-1]
def _UpperCAmelCase ( snake_case = 1_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = 0
for i in range(1 , snake_case ):
if is_palindrome(snake_case ) and is_palindrome(bin(snake_case ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 82
| 1
|
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
assert isinstance(A__ , A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_sql_dataset(A__ , A__ )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=A__ , cache_dir=A__ ).read()
_check_sql_dataset(A__ , A__ )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
with contextlib.closing(sqlitea.connect(A__ ) ) as con:
_UpperCAmelCase = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = os.path.join(A__ , '''tmp.sql''' )
_UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A__ ).read()
SqlDatasetWriter(A__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_UpperCAmelCase = iter_sql_file(A__ )
_UpperCAmelCase = iter_sql_file(A__ )
for rowa, rowa in zip(A__ , A__ ):
assert rowa == rowa
@require_sqlalchemy
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = os.path.join(A__ , '''tmp.sql''' )
_UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A__ ).read()
SqlDatasetWriter(A__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_UpperCAmelCase = iter_sql_file(A__ )
_UpperCAmelCase = iter_sql_file(A__ )
for rowa, rowa in zip(A__ , A__ ):
assert rowa == rowa
@require_sqlalchemy
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = os.path.join(A__ , '''tmp.sql''' )
_UpperCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=A__ ).read()
with pytest.raises(A__ ):
SqlDatasetWriter(A__ , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 351
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]:
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple:
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]:
_UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] )->Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->List[str]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase )
def lowercase__ ( self : Any )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase )
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase )
def lowercase__ ( self : Any )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase )
def lowercase__ ( self : Dict )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Tuple )->List[str]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 326
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ = logging.get_logger(__name__)
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : int = ['''input_features''', '''attention_mask''']
def __init__( self : int , a_ : List[Any]=80 , a_ : Union[str, Any]=1_60_00 , a_ : List[str]=80 , a_ : str=0.0 , a_ : Union[str, Any]=True , a_ : List[Any]=True , a_ : Union[str, Any]=True , **a_ : Tuple , ):
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
lowerCAmelCase_ : Any = num_mel_bins
lowerCAmelCase_ : List[str] = do_ceptral_normalize
lowerCAmelCase_ : Any = normalize_means
lowerCAmelCase_ : Dict = normalize_vars
lowerCAmelCase_ : Any = True
def lowerCamelCase ( self : str , a_ : np.ndarray , ):
lowerCAmelCase_ : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase_ : str = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
lowerCAmelCase_ : Union[str, Any] = ta_kaldi.fbank(lowerCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCamelCase ( a_ : np.ndarray , a_ : int , a_ : Optional[bool] = True , a_ : Optional[bool] = True , a_ : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
lowerCAmelCase_ : int = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : List[str] = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ )
if normalize_vars:
lowerCAmelCase_ : Optional[int] = x[:input_length].std(axis=0 )
lowerCAmelCase_ : Union[str, Any] = np.divide(lowerCAmelCase__ , lowerCAmelCase__ )
if input_length < x.shape[0]:
lowerCAmelCase_ : Tuple = padding_value
# make sure array is in float32
lowerCAmelCase_ : Union[str, Any] = x.astype(np.floataa )
return x
def lowerCamelCase ( self : str , a_ : List[np.ndarray] , a_ : Optional[np.ndarray] = None ):
lowerCAmelCase_ : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase__ , lowerCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
def __call__( self : int , a_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Optional[int] = None , a_ : bool = False , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[int] = None , a_ : Optional[bool] = None , **a_ : int , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Tuple = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : List[str] = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
lowerCAmelCase_ : Optional[Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Optional[Any] = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Dict = [self._extract_fbank_features(lowerCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : Tuple = BatchFeature({"input_features": features} )
lowerCAmelCase_ : Tuple = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
# make sure list is in array format
lowerCAmelCase_ : str = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCAmelCase__ ):
lowerCAmelCase_ : int = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase_ : Tuple = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase_ : Tuple = (
np.array(lowerCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase_ : List[str] = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 241
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 251
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : Any = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Tuple = 'MobileNetV1Config'
# Base docstring
lowerCAmelCase : Dict = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase : Any = [1, 10_24, 7, 7]
# Image classification docstring
lowerCAmelCase : Optional[Any] = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase : List[str] = 'tabby, tabby cat'
lowerCAmelCase : str = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_( A : Union[str, Any] , A : Optional[Any] , A : Optional[Any]=None):
UpperCamelCase = {}
if isinstance(A , A):
UpperCamelCase = model.mobilenet_va
else:
UpperCamelCase = model
UpperCamelCase = 'MobilenetV1/Conv2d_0/'
UpperCamelCase = backbone.conv_stem.convolution.weight
UpperCamelCase = backbone.conv_stem.normalization.bias
UpperCamelCase = backbone.conv_stem.normalization.weight
UpperCamelCase = backbone.conv_stem.normalization.running_mean
UpperCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13):
UpperCamelCase = i + 1
UpperCamelCase = i * 2
UpperCamelCase = backbone.layer[pt_index]
UpperCamelCase = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
UpperCamelCase = backbone.layer[pt_index + 1]
UpperCamelCase = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
if isinstance(A , A):
UpperCamelCase = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
UpperCamelCase = model.classifier.weight
UpperCamelCase = model.classifier.bias
return tf_to_pt_map
def A_( A : int , A : str , A : Optional[int]):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.')
raise
# Load weights from TF model
UpperCamelCase = tf.train.list_variables(A)
UpperCamelCase = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''')
UpperCamelCase = tf.train.load_variable(A , A)
UpperCamelCase = array
# Build TF to PyTorch weights loading map
UpperCamelCase = _build_tf_to_pytorch_map(A , A , A)
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''')
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''')
continue
UpperCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise')
UpperCamelCase = np.transpose(A , (2, 3, 0, 1))
elif "weights" in name:
logger.info('Transposing')
if len(pointer.shape) == 2: # copying into linear layer
UpperCamelCase = array.squeeze().transpose()
else:
UpperCamelCase = np.transpose(A , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''')
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''')
UpperCamelCase = torch.from_numpy(A)
tf_weights.pop(A , A)
tf_weights.pop(name + '/RMSProp' , A)
tf_weights.pop(name + '/RMSProp_1' , A)
tf_weights.pop(name + '/ExponentialMovingAverage' , A)
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}''')
return model
def A_( A : torch.Tensor , A : nn.Convad):
UpperCamelCase , UpperCamelCase = features.shape[-2:]
UpperCamelCase , UpperCamelCase = conv_layer.stride
UpperCamelCase , UpperCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase = max(kernel_height - stride_height , 0)
else:
UpperCamelCase = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
UpperCamelCase = max(kernel_width - stride_width , 0)
else:
UpperCamelCase = max(kernel_width - (in_width % stride_width) , 0)
UpperCamelCase = pad_along_width // 2
UpperCamelCase = pad_along_width - pad_left
UpperCamelCase = pad_along_height // 2
UpperCamelCase = pad_along_height - pad_top
UpperCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A , A , 'constant' , 0.0)
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ , A_ , A_ , A_ = 1 , A_ = 1 , A_ = False , A_ = True , A_ = True , )-> None:
'''simple docstring'''
super().__init__()
UpperCamelCase = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
UpperCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase = nn.Convad(
in_channels=A_ , out_channels=A_ , kernel_size=A_ , stride=A_ , padding=A_ , groups=A_ , bias=A_ , padding_mode='zeros' , )
if use_normalization:
UpperCamelCase = nn.BatchNormad(
num_features=A_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=A_ , track_running_stats=A_ , )
else:
UpperCamelCase = None
if use_activation:
if isinstance(A_ , A_ ):
UpperCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A_ ):
UpperCamelCase = ACTaFN[config.hidden_act]
else:
UpperCamelCase = config.hidden_act
else:
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> torch.Tensor:
'''simple docstring'''
if self.config.tf_padding:
UpperCamelCase = apply_tf_padding(A_ , self.convolution )
UpperCamelCase = self.convolution(A_ )
if self.normalization is not None:
UpperCamelCase = self.normalization(A_ )
if self.activation is not None:
UpperCamelCase = self.activation(A_ )
return features
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = """mobilenet_v1"""
lowerCAmelCase_ = """pixel_values"""
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
if isinstance(A_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCAmelCase : Union[str, Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : Union[str, Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ = True )-> Union[str, Any]:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config
UpperCamelCase = 32
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase = MobileNetVaConvLayer(
A_ , in_channels=config.num_channels , out_channels=A_ , kernel_size=3 , stride=2 , )
UpperCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase = nn.ModuleList()
for i in range(13 ):
UpperCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=3 , stride=strides[i] , groups=A_ , ) )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=1 , ) )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , )-> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
UpperCamelCase = self.conv_stem(A_ )
UpperCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase = layer_module(A_ )
if output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = hidden_states
if self.pooler is not None:
UpperCamelCase = torch.flatten(self.pooler(A_ ) , start_dim=1 )
else:
UpperCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=A_ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> None:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = MobileNetVaModel(A_ )
UpperCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=A_ )
UpperCamelCase = nn.Linear(A_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , )-> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.mobilenet_va(A_ , output_hidden_states=A_ , return_dict=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(self.dropout(A_ ) )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = 'single_label_classification'
else:
UpperCamelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(A_ , A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A_ , logits=A_ , hidden_states=outputs.hidden_states , )
| 251
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: List[str] ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = downstream_dict['projector.weight']
SCREAMING_SNAKE_CASE : Any = downstream_dict['projector.bias']
SCREAMING_SNAKE_CASE : Any = downstream_dict['model.post_net.linear.weight']
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict['model.post_net.linear.bias']
return model
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Any ,__UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = downstream_dict['model.linear.weight']
SCREAMING_SNAKE_CASE : Tuple = downstream_dict['model.linear.bias']
return model
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = downstream_dict['connector.weight']
SCREAMING_SNAKE_CASE : List[str] = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
SCREAMING_SNAKE_CASE : List[str] = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
SCREAMING_SNAKE_CASE : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
SCREAMING_SNAKE_CASE : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
SCREAMING_SNAKE_CASE : List[str] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
SCREAMING_SNAKE_CASE : Any = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
SCREAMING_SNAKE_CASE : int = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.load(__UpperCamelCase ,map_location='cpu' )
SCREAMING_SNAKE_CASE : str = checkpoint['Downstream']
SCREAMING_SNAKE_CASE : Dict = WavaVecaConfig.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,do_normalize=__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
SCREAMING_SNAKE_CASE : str = convert_classification(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
elif arch.endswith('ForAudioFrameClassification' ):
SCREAMING_SNAKE_CASE : Tuple = convert_diarization(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
elif arch.endswith('ForXVector' ):
SCREAMING_SNAKE_CASE : List[str] = convert_xvector(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
UpperCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 251
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE : Optional[int] = len(A ) - 1
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree, A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ), 5 ) == 1
return output_values
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE : str = self.basis_function(A )
SCREAMING_SNAKE_CASE : str = 0.0
SCREAMING_SNAKE_CASE : List[Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCamelCase_ ( self, A = 0.01 ):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE : List[str] = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE : Optional[int] = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE : List[Any] = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
A, A, color='blue', label='Curve of Degree ' + str(self.degree ), )
plt.scatter(A, A, color='red', label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 251
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class A ( A_ ):
UpperCamelCase_ : str =field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase_ : ClassVar[Features] =Features({'''text''': Value('''string''' )} )
UpperCamelCase_ : ClassVar[Features] =Features({} )
UpperCamelCase_ : str ="text"
@property
def _A (self ):
return {self.text_column: "text"}
| 304
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 1
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowercase__( ):
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=__lowercase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=__lowercase , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=__lowercase , help='where to store parsed gold_data_path file' , )
lowercase_ : Optional[Any] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowercase_ : Union[str, Any] = json.load(__lowercase )
for dpr_record in tqdm(__lowercase ):
lowercase_ : int = dpr_record['question']
lowercase_ : int = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(__lowercase ) + '\n' )
if __name__ == "__main__":
main()
| 213
|
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE :List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = simple_accuracy(__lowercase , __lowercase )
_UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0]
_UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowercase , __lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mrpc":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowercase , __lowercase )
elif task_name == "qqp":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
| 22
| 0
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : str = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
lowerCamelCase : str = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
lowerCamelCase_ = set(_lowerCamelCase )
return pairs
class A( __lowercase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , A_ : Dict , A_ : Tuple , A_ : Optional[Any]="<s>" , A_ : Dict="</s>" , A_ : Any="</s>" , A_ : Dict="<s>" , A_ : Union[str, Any]="<unk>" , A_ : Union[str, Any]="<pad>" , A_ : List[str]="<mask>" , **A_ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , **A_ , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = merges_file
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
self.add_from_file(A_ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('\n' )[:-1]
lowerCamelCase_ = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = {}
def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self : List[str] , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def a__ ( self : Any , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self : int , A_ : List[Any] ) -> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowerCamelCase_ = get_pairs(A_ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(A_ ):
try:
lowerCamelCase_ = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(A_ )
lowerCamelCase_ = new_word
if len(A_ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(A_ )
lowerCamelCase_ = """@@ """.join(A_ )
lowerCamelCase_ = word[:-4]
lowerCamelCase_ = word
return word
def a__ ( self : List[str] , A_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = re.findall(r'\S+\n?' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def a__ ( self : str , A_ : Optional[int] ) -> str:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def a__ ( self : Any , A_ : List[Any] ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def a__ ( self : int , A_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = """ """.join(A_ ).replace('@@ ' , '' ).strip()
return out_string
def a__ ( self : Optional[Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(A_ ):
copyfile(self.merges_file , A_ )
return out_vocab_file, out_merge_file
def a__ ( self : str , A_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if isinstance(A_ , A_ ):
try:
with open(A_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCamelCase_ = f.readlines()
for lineTmp in lines:
lowerCamelCase_ = lineTmp.strip()
lowerCamelCase_ = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
lowerCamelCase_ = line[:idx]
lowerCamelCase_ = len(self.encoder )
| 363
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase_ = TOKENIZER_CLASSES
else:
lowerCamelCase_ = {tokenizer_name: getattr(lowercase , tokenizer_name + 'Fast' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase_ = TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase_ = True
if checkpoint_name is None:
lowerCamelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase_ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase_ = tokenizer_class.from_pretrained(lowercase , force_download=lowercase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase_ , lowerCamelCase_ = checkpoint.split('/' )
lowerCamelCase_ = os.path.join(lowercase , lowercase )
elif add_prefix:
lowerCamelCase_ = checkpoint
lowerCamelCase_ = dump_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase_ = file_path.split(lowercase )[-1][0]
if next_char == "/":
lowerCamelCase_ = os.path.join(lowercase , lowercase )
lowerCamelCase_ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase_ = tokenizer.save_pretrained(
lowercase , legacy_format=lowercase , filename_prefix=lowercase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowercase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 208
| 0
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Any:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
_a : Tuple = torch.tensor(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ).unsqueeze(0 ) # Batch size 1
_a : Union[str, Any] = model(lowerCAmelCase_ )[0] # The last hidden-state is the first element of the output tuple
_a : int = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_a : str = logits[0, masked_index, :]
_a : int = logits.softmax(dim=0 )
_a , _a : Dict = prob.topk(k=lowerCAmelCase_ , dim=0 )
_a : str = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCAmelCase_ ) )] )
_a : Optional[int] = tokenizer.mask_token
_a : Tuple = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_a : Optional[Any] = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(lowerCAmelCase_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(lowerCAmelCase_ ) , lowerCAmelCase_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowerCAmelCase_ , lowerCAmelCase_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__lowerCAmelCase = CamembertTokenizer.from_pretrained('''camembert-base''')
__lowerCAmelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
__lowerCAmelCase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 89
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=768 ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
__snake_case : Optional[int] = proj_size
__snake_case : str = CLIPVisionModel(UpperCAmelCase )
__snake_case : Tuple = PaintByExampleMapper(UpperCAmelCase )
__snake_case : Union[str, Any] = nn.LayerNorm(config.hidden_size )
__snake_case : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__snake_case : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.model(pixel_values=UpperCAmelCase )
__snake_case : Optional[int] = clip_output.pooler_output
__snake_case : Any = self.mapper(latent_states[:, None] )
__snake_case : Any = self.final_layer_norm(UpperCAmelCase )
__snake_case : str = self.proj_out(UpperCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
__snake_case : List[Any] = (config.num_hidden_layers + 1) // 5
__snake_case : Dict = config.hidden_size
__snake_case : str = 1
__snake_case : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , activation_fn="gelu" , attention_bias=UpperCAmelCase )
for _ in range(UpperCAmelCase )
] )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
for block in self.blocks:
__snake_case : int = block(UpperCAmelCase )
return hidden_states
| 326
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __lowerCamelCase ( ):
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCAmelCase__ = parser.parse_args()
return args.f
class a_ ( __snake_case ):
'''simple docstring'''
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(lowercase__)
def __snake_case ( self : Optional[int] , lowercase__ : str):
'''simple docstring'''
lowerCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py')
with patch.object(lowercase__ , 'argv' , lowercase__):
lowerCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase__ , 0.666)
@slow
@require_torch_non_multi_gpu
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowercase__)
lowerCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowercase__)
lowerCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowercase__)
| 362
|
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ = 'docs/source/en/_toctree.yml'
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = defaultdict(lowerCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase__ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase__ = []
for duplicate_key in duplicates:
lowerCAmelCase__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : s["title"].lower() )
def __lowerCamelCase ( lowerCAmelCase__=False ):
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
lowerCAmelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase__ = content[api_idx]['sections']
# Then to the model doc
lowerCAmelCase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase__ = api_doc[model_idx]['sections']
lowerCAmelCase__ = [(idx, section) for idx, section in enumerate(lowerCAmelCase__ ) if 'sections' in section]
lowerCAmelCase__ = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase__ = modality_doc['sections']
lowerCAmelCase__ = clean_model_doc_toc(lowerCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowerCAmelCase__ = True
if overwrite:
lowerCAmelCase__ = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase__ = model_doc
lowerCAmelCase__ = api_doc
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 119
| 0
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCamelCase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = torchvision.models.resnetaaa(pretrained=A )
SCREAMING_SNAKE_CASE : Optional[int] = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Sequential(*A )
SCREAMING_SNAKE_CASE : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pool(self.model(A ) )
SCREAMING_SNAKE_CASE : Any = torch.flatten(A, start_dim=2 )
SCREAMING_SNAKE_CASE : Dict = out.transpose(1, 2 ).contiguous()
return out # BxNx2048
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [json.loads(A ) for l in open(A )]
SCREAMING_SNAKE_CASE : List[Any] = os.path.dirname(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Dict = labels
SCREAMING_SNAKE_CASE : Tuple = len(A )
SCREAMING_SNAKE_CASE : Any = max_seq_length
SCREAMING_SNAKE_CASE : Tuple = transforms
def __len__( self ):
'''simple docstring'''
return len(self.data )
def __getitem__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'], add_special_tokens=A ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE : Dict = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE : Dict = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : Tuple = Image.open(os.path.join(self.data_dir, self.data[index]['img'] ) ).convert('RGB' )
SCREAMING_SNAKE_CASE : Optional[int] = self.transforms(A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowercase__( __UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [len(row['sentence'] ) for row in batch]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = len(__UpperCamelCase ), max(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(__UpperCamelCase ,__UpperCamelCase ,dtype=torch.long )
SCREAMING_SNAKE_CASE : int = torch.zeros(__UpperCamelCase ,__UpperCamelCase ,dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__UpperCamelCase ,__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : int = input_row['sentence']
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Any = torch.stack([row['image'] for row in batch] )
SCREAMING_SNAKE_CASE : Dict = torch.stack([row['label'] for row in batch] )
SCREAMING_SNAKE_CASE : Any = torch.stack([row['image_start_token'] for row in batch] )
SCREAMING_SNAKE_CASE : List[Any] = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowercase__( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowercase__( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] ,std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] ,),
] )
| 251
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251
| 1
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _snake_case ( _lowercase ):
lowerCamelCase__: bool = field(default=_lowercase , metadata={"help": "Whether to use SortishSampler or not."} )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowerCamelCase__: Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowerCamelCase__: Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowerCamelCase__: Optional[Union[str, Path, GenerationConfig]] = field(
default=_lowercase , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _lowerCamelCase ( self: int ) -> Dict:
__UpperCAmelCase : str = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = v.to_dict()
return d
| 342
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342
| 1
|
def lowerCAmelCase__ ( ):
"""simple docstring"""
for n in range(1 , 100_0000 ):
yield n * (n + 1) // 2
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = 1
__a = 2
while i * i <= n:
__a = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase__ ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(_SCREAMING_SNAKE_CASE ) > 500 )
if __name__ == "__main__":
print(solution())
| 302
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['''prompt''']
lowerCAmelCase_ = ['''prompt''', '''negative_prompt''']
lowerCAmelCase_ = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase_ = False
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A )
return model
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior
__SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor
__SCREAMING_SNAKE_CASE : str = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , )
__SCREAMING_SNAKE_CASE : int = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = '''cpu'''
__SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A )
__SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) )
__SCREAMING_SNAKE_CASE : Tuple = output.image_embeds
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__SCREAMING_SNAKE_CASE : Tuple = image[0, -10:]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu'''
__SCREAMING_SNAKE_CASE : List[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 303
| 0
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__a = logging.get_logger(__name__)
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
lowerCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
lowerCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = self.task_name.lower()
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''train'''
lowerCAmelCase = '''dev'''
lowerCAmelCase = '''test'''
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = Split.train ,_SCREAMING_SNAKE_CASE = None ,) -> str:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ,_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Union[str, Any] = args
UpperCAmelCase_ : Optional[Any] = glue_processors[args.task_name]()
UpperCAmelCase_ : List[Any] = glue_output_modes[args.task_name]
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
try:
UpperCAmelCase_ : Any = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
UpperCAmelCase_ : Tuple = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' ,)
UpperCAmelCase_ : str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ : Any = label_list[2], label_list[1]
UpperCAmelCase_ : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_SCREAMING_SNAKE_CASE ):
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
UpperCAmelCase_ : List[str] = time.time()
UpperCAmelCase_ : int = torch.load(_SCREAMING_SNAKE_CASE )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
UpperCAmelCase_ : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCAmelCase_ : List[Any] = self.processor.get_test_examples(args.data_dir )
else:
UpperCAmelCase_ : int = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCAmelCase_ : Optional[Any] = examples[:limit_length]
UpperCAmelCase_ : Optional[Any] = glue_convert_examples_to_features(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,max_length=args.max_seq_length ,label_list=_SCREAMING_SNAKE_CASE ,output_mode=self.output_mode ,)
UpperCAmelCase_ : List[str] = time.time()
torch.save(self.features ,_SCREAMING_SNAKE_CASE )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Optional[Any]:
return len(self.features )
def __getitem__( self ,_SCREAMING_SNAKE_CASE ) -> InputFeatures:
return self.features[i]
def a__ ( self ) -> Tuple:
return self.label_list
| 355
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = r'''\w+[.]\d+'''
UpperCAmelCase_ : int = re.findall(_lowercase , _lowercase )
for pat in pats:
UpperCAmelCase_ : List[Any] = key.replace(_lowercase , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase_ : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase_ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=42 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase_ : str = flax_model.init_weights(PRNGKey(_lowercase ) )
UpperCAmelCase_ : List[Any] = flatten_dict(_lowercase )
UpperCAmelCase_ : int = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : Optional[int] = rename_key(_lowercase )
UpperCAmelCase_ : List[str] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase_, UpperCAmelCase_ : Any = rename_key_and_reshape_tensor(_lowercase , _lowercase , _lowercase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : int = jnp.asarray(_lowercase )
return unflatten_dict(_lowercase )
| 235
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase_ = {
'''b0''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_24,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 12_80,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_40,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 14_08,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_60,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 15_36,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_00,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 17_92,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_80,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 20_48,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_56,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 23_04,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_28,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 25_60,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_00,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = EfficientNetConfig()
snake_case_ = CONFIG_MAP[model_name]['''hidden_dim''']
snake_case_ = CONFIG_MAP[model_name]['''width_coef''']
snake_case_ = CONFIG_MAP[model_name]['''depth_coef''']
snake_case_ = CONFIG_MAP[model_name]['''image_size''']
snake_case_ = CONFIG_MAP[model_name]['''dropout_rate''']
snake_case_ = CONFIG_MAP[model_name]['''dw_padding''']
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = 1000
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ():
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = CONFIG_MAP[model_name]['''image_size''']
snake_case_ = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=SCREAMING_SNAKE_CASE__ , )
return preprocessor
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
snake_case_ = sorted(set(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
snake_case_ = {b: str(SCREAMING_SNAKE_CASE__ ) for b, i in zip(SCREAMING_SNAKE_CASE__ , range(SCREAMING_SNAKE_CASE__ ) )}
snake_case_ = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
snake_case_ = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
snake_case_ = {}
for item in rename_keys:
if item[0] in original_param_names:
snake_case_ = '''efficientnet.''' + item[1]
snake_case_ = '''classifier.weight'''
snake_case_ = '''classifier.bias'''
return key_mapping
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for key, value in tf_params.items():
if "normalization" in key:
continue
snake_case_ = key_mapping[key]
if "_conv" in key and "kernel" in key:
snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
snake_case_ = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE__ ) )
else:
snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = model_classes[model_name](
include_top=SCREAMING_SNAKE_CASE__ , weights='''imagenet''' , input_tensor=SCREAMING_SNAKE_CASE__ , input_shape=SCREAMING_SNAKE_CASE__ , pooling=SCREAMING_SNAKE_CASE__ , classes=1000 , classifier_activation='''softmax''' , )
snake_case_ = original_model.trainable_variables
snake_case_ = original_model.non_trainable_variables
snake_case_ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
snake_case_ = param.numpy()
snake_case_ = list(tf_params.keys() )
# Load HuggingFace model
snake_case_ = get_efficientnet_config(SCREAMING_SNAKE_CASE__ )
snake_case_ = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
snake_case_ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
snake_case_ = rename_keys(SCREAMING_SNAKE_CASE__ )
replace_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Initialize preprocessor and preprocess input image
snake_case_ = convert_image_processor(SCREAMING_SNAKE_CASE__ )
snake_case_ = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
snake_case_ = hf_model(**SCREAMING_SNAKE_CASE__ )
snake_case_ = outputs.logits.detach().numpy()
# Original model inference
snake_case_ = False
snake_case_ = CONFIG_MAP[model_name]['''image_size''']
snake_case_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
snake_case_ = image.img_to_array(SCREAMING_SNAKE_CASE__ )
snake_case_ = np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=0 )
snake_case_ = original_model.predict(SCREAMING_SNAKE_CASE__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
os.mkdir(SCREAMING_SNAKE_CASE__ )
# Save converted model and image processor
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
preprocessor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
snake_case_ = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(SCREAMING_SNAKE_CASE__ )
hf_model.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 8
|
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_UpperCamelCase = data_utils.TransfoXLTokenizer
_UpperCamelCase = data_utils.TransfoXLCorpus
_UpperCamelCase = data_utils
_UpperCamelCase = data_utils
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowerCAmelCase ,'rb' ) as fp:
__lowerCamelCase : Optional[Any] = pickle.load(_lowerCAmelCase ,encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCamelCase : Tuple = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
__lowerCamelCase : str = corpus.vocab.__dict__
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' ,_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCamelCase : int = os.path.abspath(_lowerCAmelCase )
__lowerCamelCase : Any = os.path.abspath(_lowerCAmelCase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCamelCase : Optional[int] = TransfoXLConfig()
else:
__lowerCamelCase : Optional[int] = TransfoXLConfig.from_json_file(_lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
__lowerCamelCase : List[str] = TransfoXLLMHeadModel(_lowerCAmelCase )
__lowerCamelCase : Dict = load_tf_weights_in_transfo_xl(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Save pytorch-model
__lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : int = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
print(F'Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}' )
torch.save(model.state_dict() ,_lowerCAmelCase )
print(F'Save configuration file to {os.path.abspath(_lowerCAmelCase )}' )
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_UpperCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 208
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase :
__snake_case : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
__snake_case : bool = field(default=_UpperCAmelCase ,metadata={"help": "Whether tp freeze the encoder."} )
__snake_case : bool = field(default=_UpperCAmelCase ,metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class __UpperCAmelCase :
__snake_case : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__snake_case : Optional[str] = field(
default="summarization" ,metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} ,)
__snake_case : Optional[int] = field(
default=1024 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__snake_case : Optional[int] = field(
default=128 ,metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__snake_case : Optional[int] = field(
default=142 ,metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} ,)
__snake_case : Optional[int] = field(
default=142 ,metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__snake_case : Optional[int] = field(default=-1 ,metadata={"help": "# training examples. -1 means use all."} )
__snake_case : Optional[int] = field(default=-1 ,metadata={"help": "# validation examples. -1 means use all."} )
__snake_case : Optional[int] = field(default=-1 ,metadata={"help": "# test examples. -1 means use all."} )
__snake_case : Optional[str] = field(default=_UpperCAmelCase ,metadata={"help": "Source language id for translation."} )
__snake_case : Optional[str] = field(default=_UpperCAmelCase ,metadata={"help": "Target language id for translation."} )
__snake_case : Optional[int] = field(default=_UpperCAmelCase ,metadata={"help": "# num_beams to use for evaluation."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[Any]:
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(snake_case__ ,os.path.join(snake_case__ ,F'{split}_results.json' ) )
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
check_output_dir(snake_case__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" ,snake_case__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(snake_case__ ,snake_case__ ,snake_case__ ):
assert hasattr(snake_case__ ,snake_case__ ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(snake_case__ ,snake_case__ ,getattr(snake_case__ ,snake_case__ ) )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path ,from_tf=""".ckpt""" in model_args.model_name_or_path ,config=snake_case__ ,cache_dir=model_args.cache_dir ,)
# use task specific params
use_task_specific_params(snake_case__ ,data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_SCREAMING_SNAKE_CASE = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case__ ,(MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_SCREAMING_SNAKE_CASE = SeqaSeqDataset
# Get datasets
_SCREAMING_SNAKE_CASE = (
dataset_class(
snake_case__ ,type_path="""train""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE = (
dataset_class(
snake_case__ ,type_path="""val""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_SCREAMING_SNAKE_CASE = (
dataset_class(
snake_case__ ,type_path="""test""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_predict
else None
)
# Initialize our Trainer
_SCREAMING_SNAKE_CASE = (
build_compute_metrics_fn(data_args.task ,snake_case__ ) if training_args.predict_with_generate else None
)
_SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=snake_case__ ,args=snake_case__ ,data_args=snake_case__ ,train_dataset=snake_case__ ,eval_dataset=snake_case__ ,data_collator=SeqaSeqDataCollator(
snake_case__ ,snake_case__ ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=snake_case__ ,tokenizer=snake_case__ ,)
_SCREAMING_SNAKE_CASE = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_SCREAMING_SNAKE_CASE = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_SCREAMING_SNAKE_CASE = train_result.metrics
_SCREAMING_SNAKE_CASE = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" ,snake_case__ ,training_args.output_dir )
all_metrics.update(snake_case__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,"""trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_SCREAMING_SNAKE_CASE = trainer.evaluate(metric_key_prefix="""val""" )
_SCREAMING_SNAKE_CASE = data_args.n_val
_SCREAMING_SNAKE_CASE = round(metrics["""val_loss"""] ,4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" ,snake_case__ ,training_args.output_dir )
all_metrics.update(snake_case__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_SCREAMING_SNAKE_CASE = trainer.predict(test_dataset=snake_case__ ,metric_key_prefix="""test""" )
_SCREAMING_SNAKE_CASE = test_output.metrics
_SCREAMING_SNAKE_CASE = data_args.n_test
if trainer.is_world_process_zero():
_SCREAMING_SNAKE_CASE = round(metrics["""test_loss"""] ,4 )
handle_metrics("""test""" ,snake_case__ ,training_args.output_dir )
all_metrics.update(snake_case__ )
if training_args.predict_with_generate:
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
test_output.predictions ,skip_special_tokens=snake_case__ ,clean_up_tokenization_spaces=snake_case__ )
_SCREAMING_SNAKE_CASE = lmap(str.strip ,snake_case__ )
write_txt_file(snake_case__ ,os.path.join(training_args.output_dir ,"""test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(snake_case__ ,os.path.join(training_args.output_dir ,"""all_results.json""" ) )
return all_metrics
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 125
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCamelCase = get_logger(__name__)
class __UpperCAmelCase :
__snake_case : Tuple = "dummy_data"
__snake_case : List[Any] = "datasets"
__snake_case : List[Any] = False
def __init__( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Union[Version, str] , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[List[Callable]] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = dataset_name
_SCREAMING_SNAKE_CASE = cache_dir
_SCREAMING_SNAKE_CASE = use_local_dummy_data
_SCREAMING_SNAKE_CASE = config
# download_callbacks take a single url as input
_SCREAMING_SNAKE_CASE = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_SCREAMING_SNAKE_CASE = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_SCREAMING_SNAKE_CASE = str(UpperCAmelCase_ )
# to be downloaded
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if self._dummy_file is None:
_SCREAMING_SNAKE_CASE = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_SCREAMING_SNAKE_CASE = cached_path(
UpperCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCAmelCase_ , force_extract=UpperCAmelCase_ )
return os.path.join(UpperCAmelCase_ , self.dummy_file_name )
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
if self._bucket_url is None:
_SCREAMING_SNAKE_CASE = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def UpperCamelCase ( self: str , UpperCAmelCase_: str , *UpperCAmelCase_: Dict ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_SCREAMING_SNAKE_CASE = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_SCREAMING_SNAKE_CASE = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return self.create_dummy_data_dict(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCAmelCase_ , UpperCAmelCase_ )
else:
return self.create_dummy_data_single(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Any ):
'''simple docstring'''
return self.download_and_extract(UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return self.download_and_extract(UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Dict , *UpperCAmelCase_: Tuple , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
return path
def UpperCamelCase ( self: str ):
'''simple docstring'''
return {}
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for single_url in single_urls:
download_callback(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = single_urls
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(Path(UpperCAmelCase_ ).name ) ) for x in single_urls]
else:
_SCREAMING_SNAKE_CASE = single_urls
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(Path(UpperCAmelCase_ ).name ) )
_SCREAMING_SNAKE_CASE = value
# make sure that values are unique
if all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_SCREAMING_SNAKE_CASE = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_SCREAMING_SNAKE_CASE = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCAmelCase_ ) ) for url in data_url )
_SCREAMING_SNAKE_CASE = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_SCREAMING_SNAKE_CASE = [data_url[0]] * len(UpperCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCAmelCase_ )
return dummy_data_list
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
def _iter_archive_members(UpperCAmelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
_SCREAMING_SNAKE_CASE = Path(self.dummy_file ).parent
_SCREAMING_SNAKE_CASE = path.relative_to(UpperCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_SCREAMING_SNAKE_CASE = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = Path(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = _iter_archive_members(UpperCAmelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCAmelCase_ ).as_posix(), file_path.open("""rb""" )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [paths]
for path in paths:
if os.path.isfile(UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCAmelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
| 125
| 1
|
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__SCREAMING_SNAKE_CASE : Any = """bert-base-cased"""
__SCREAMING_SNAKE_CASE : Any = """fp16"""
__SCREAMING_SNAKE_CASE : Optional[Any] = """bf16"""
__SCREAMING_SNAKE_CASE : Dict = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().setUp()
_UpperCAmelCase : Optional[int] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def _A ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
_UpperCAmelCase : Any = F"""{i + 1}"""
_UpperCAmelCase : str = strategy
with mockenv_context(**A ):
_UpperCAmelCase : Any = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _A ( self : Any ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(A ):
_UpperCAmelCase : Union[str, Any] = self.dist_env.copy()
_UpperCAmelCase : Optional[Any] = prefetch_policy
with mockenv_context(**A ):
_UpperCAmelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _A ( self : int ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(A ):
_UpperCAmelCase : Optional[int] = self.dist_env.copy()
_UpperCAmelCase : Tuple = state_dict_type
with mockenv_context(**A ):
_UpperCAmelCase : List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _A ( self : List[str] ):
_UpperCAmelCase : List[str] = AutoModel.from_pretrained(A )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCAmelCase : Any = self.dist_env.copy()
_UpperCAmelCase : Optional[int] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCAmelCase : Optional[Any] = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
_UpperCAmelCase : Any = "2000"
with mockenv_context(**A ):
_UpperCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCAmelCase : Any = self.dist_env.copy()
_UpperCAmelCase : Tuple = "TRANSFORMER_BASED_WRAP"
_UpperCAmelCase : Tuple = "T5Layer"
with mockenv_context(**A ):
_UpperCAmelCase : Dict = FullyShardedDataParallelPlugin()
with self.assertRaises(A ) as cm:
fsdp_plugin.set_auto_wrap_policy(A )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : Optional[int] = "SIZE_BASED_WRAP"
_UpperCAmelCase : List[str] = "0"
with mockenv_context(**A ):
_UpperCAmelCase : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _A ( self : List[str] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : str = mp_dtype
with mockenv_context(**A ):
_UpperCAmelCase : int = Accelerator()
if mp_dtype == "fp16":
_UpperCAmelCase : Union[str, Any] = torch.floataa
elif mp_dtype == "bf16":
_UpperCAmelCase : Union[str, Any] = torch.bfloataa
_UpperCAmelCase : Optional[int] = MixedPrecision(param_dtype=A , reduce_dtype=A , buffer_dtype=A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(A )
def _A ( self : Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCAmelCase : str = self.dist_env.copy()
_UpperCAmelCase : Dict = str(A ).lower()
with mockenv_context(**A ):
_UpperCAmelCase : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=A ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def _A ( self : List[Any] ):
super().setUp()
_UpperCAmelCase : Optional[int] = 0.82
_UpperCAmelCase : int = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
_UpperCAmelCase : Tuple = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCAmelCase : Tuple = 160
_UpperCAmelCase : Any = 160
_UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = os.path.join(self.test_scripts_folder , "test_performance.py" )
_UpperCAmelCase : Optional[int] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
_UpperCAmelCase : Tuple = cmd.copy()
for i, strategy in enumerate(A ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def _A ( self : List[Any] ):
_UpperCAmelCase : Dict = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
_UpperCAmelCase : Union[str, Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(A ):
_UpperCAmelCase : Optional[Any] = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_UpperCAmelCase : Optional[Any] = len(A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCAmelCase : Optional[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
_UpperCAmelCase : Optional[int] = cmd_config[:-1]
_UpperCAmelCase : List[str] = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
def _A ( self : List[Any] ):
_UpperCAmelCase : str = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
_UpperCAmelCase : Tuple = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCAmelCase : str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(A ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A , env=os.environ.copy() )
| 31
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=18, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, ) -> Union[str, Any]:
UpperCamelCase : str = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : Any = image_size
UpperCamelCase : Optional[int] = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Union[str, Any] = do_resize
UpperCamelCase : List[Any] = size
UpperCamelCase : int = do_normalize
def snake_case_ ( self ) -> Tuple:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = ImageGPTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> int:
UpperCamelCase : str = ImageGPTImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'clusters' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 18, 'width': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'height': 42, 'width': 42} )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase : int = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, obj[key] ) )
else:
self.assertEqual(obj[key], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE_, 'image_processor.json' )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase : List[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], SCREAMING_SNAKE_CASE_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def snake_case_ ( self ) -> str:
pass
def UpperCamelCase ( ) -> int:
UpperCamelCase : Optional[int] = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
UpperCamelCase : int = Image.open(dataset[4]['file'] )
UpperCamelCase : Optional[Any] = Image.open(dataset[5]['file'] )
UpperCamelCase : str = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : List[str] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
UpperCamelCase : List[str] = prepare_images()
# test non-batched
UpperCamelCase : int = image_processing(images[0], return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (1, 1024) )
UpperCamelCase : Union[str, Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist(), SCREAMING_SNAKE_CASE_ )
# test batched
UpperCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (2, 1024) )
UpperCamelCase : Optional[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist(), SCREAMING_SNAKE_CASE_ )
| 119
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Optional[int] = ['input_features', 'attention_mask']
def __init__( self ,__UpperCAmelCase=80 ,__UpperCAmelCase=1_60_00 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=10 ,__UpperCAmelCase=25 ,__UpperCAmelCase="hamming_window" ,__UpperCAmelCase=3_2_7_6_8.0 ,__UpperCAmelCase=0.9_7 ,__UpperCAmelCase=1.0 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,**__UpperCAmelCase ,) -> Optional[Any]:
super().__init__(feature_size=__UpperCAmelCase ,sampling_rate=__UpperCAmelCase ,padding_value=__UpperCAmelCase ,**__UpperCAmelCase )
A__ = feature_size
A__ = sampling_rate
A__ = padding_value
A__ = hop_length
A__ = win_length
A__ = frame_signal_scale
A__ = preemphasis_coeff
A__ = mel_floor
A__ = normalize_means
A__ = normalize_vars
A__ = win_function
A__ = return_attention_mask
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
def snake_case__ ( self ,__UpperCAmelCase ) -> np.ndarray:
if self.win_function == "hamming_window":
A__ = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=__UpperCAmelCase )
else:
A__ = window_function(window_length=self.sample_size ,name=self.win_function )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
A__ = spectrogram(
one_waveform * self.frame_signal_scale ,window=__UpperCAmelCase ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=__UpperCAmelCase ,preemphasis=self.preemphasis_coeff ,mel_filters=__UpperCAmelCase ,mel_floor=self.mel_floor ,log_mel='log' ,)
return msfc_features.T
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
# make sure we normalize float32 arrays
if self.normalize_means:
A__ = x[:input_length].mean(axis=0 )
A__ = np.subtract(__UpperCAmelCase ,__UpperCAmelCase )
if self.normalize_vars:
A__ = x[:input_length].std(axis=0 )
A__ = np.divide(__UpperCAmelCase ,__UpperCAmelCase )
if input_length < x.shape[0]:
A__ = padding_value
# make sure array is in float32
A__ = x.astype(np.floataa )
return x
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[np.ndarray]:
A__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCAmelCase ,__UpperCAmelCase ,self.padding_value ) for x, n in zip(__UpperCAmelCase ,__UpperCAmelCase )]
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
A__ = isinstance(__UpperCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(__UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(__UpperCAmelCase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase ,np.ndarray ):
A__ = np.asarray(__UpperCAmelCase ,dtype=np.floataa )
elif isinstance(__UpperCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [raw_speech]
# extract fbank features
A__ = [self._extract_mfsc_features(__UpperCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
A__ = BatchFeature({'input_features': features} )
A__ = self.pad(
__UpperCAmelCase ,padding=__UpperCAmelCase ,max_length=__UpperCAmelCase ,truncation=__UpperCAmelCase ,pad_to_multiple_of=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,**__UpperCAmelCase ,)
# make sure list is in array format
A__ = padded_inputs.get('input_features' )
if isinstance(input_features[0] ,__UpperCAmelCase ):
A__ = [np.asarray(__UpperCAmelCase ,dtype=np.floataa ) for feature in input_features]
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(__UpperCAmelCase ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A__ = (
np.array(__UpperCAmelCase ,dtype=np.intaa )
if self._get_padding_strategies(__UpperCAmelCase ,max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A__ = self.normalize(
padded_inputs['input_features'] ,attention_mask=__UpperCAmelCase )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
| 154
|
"""simple docstring"""
import os
def UpperCAmelCase ( ):
"""simple docstring"""
with open(os.path.dirname(UpperCamelCase__ ) + '/grid.txt' ) as f:
A__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
A__ = 0
# right
for i in range(20 ):
for j in range(17 ):
A__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A__ = temp
# down
for i in range(17 ):
for j in range(20 ):
A__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
A__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 154
| 1
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__magic_name__: List[str] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : bool = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowercase__ : bool = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowercase__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowercase__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=_lowerCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[Any] = super().to_dict()
for k, v in d.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Dict = v.to_dict()
return d
| 342
|
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = 'audio-spectrogram-transformer'
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=128 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : Any = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Any = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : List[str] = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Optional[Any] = initializer_range
lowercase : str = layer_norm_eps
lowercase : Any = patch_size
lowercase : Tuple = qkv_bias
lowercase : str = frequency_stride
lowercase : Union[str, Any] = time_stride
lowercase : Dict = max_length
lowercase : List[str] = num_mel_bins
| 173
|
import os
import pytest
from attr import dataclass
__a = '''us-east-1''' # defaults region
@dataclass
class __SCREAMING_SNAKE_CASE :
A : str
A : str = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A : Union[str, Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
A : str = {**hyperparameters, 'max_steps': 1000}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"""{self.framework}-transfromers-test"""
@property
def __lowerCamelCase ( self ):
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Union[str, Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 173
| 1
|
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[Any] ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__a ):
for j in range(__a ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) ,end='''\t''' )
else:
print('''INF''' ,end='''\t''' )
print()
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : Optional[Any] ):
__lowerCamelCase = [[float('''inf''' ) for _ in range(__a )] for _ in range(__a )]
for i in range(__a ):
for j in range(__a ):
__lowerCamelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__a ):
# looping through rows of graph array
for i in range(__a ):
# looping through columns of graph array
for j in range(__a ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__lowerCamelCase = dist[i][k] + dist[k][j]
_print_dist(__a ,__a )
return dist, v
if __name__ == "__main__":
a_ = int(input("""Enter number of vertices: """))
a_ = int(input("""Enter number of edges: """))
a_ = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
a_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
a_ = int(input("""Enter source:"""))
a_ = int(input("""Enter destination:"""))
a_ = float(input("""Enter weight:"""))
a_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 330
|
import operator as op
a__ = '''scaler.pt'''
a__ = '''pytorch_model'''
a__ = '''random_states'''
a__ = '''optimizer'''
a__ = '''scheduler'''
a__ = '''pytorch_model.bin'''
a__ = '''pytorch_model.bin.index.json'''
a__ = '''model.safetensors'''
a__ = '''model.safetensors.index.json'''
a__ = '''1.10.2'''
a__ = '''py38'''
a__ = '''4.17.0'''
a__ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
a__ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
a__ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
a__ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
a__ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
a__ = '''2.0.1'''
a__ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
a__ = ['''default''', '''reduce-overhead''', '''max-autotune''']
a__ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a__ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
a__ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
a__ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 235
| 0
|
from string import ascii_lowercase, ascii_uppercase
def _UpperCamelCase ( snake_case__ ) -> str:
if not sentence:
return ""
__UpperCAmelCase : Optional[Any] = dict(zip(snake_case__, snake_case__ ) )
return lower_to_upper.get(sentence[0], sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 358
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=13 , __lowerCamelCase: List[str]=7 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: int=True , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Tuple=99 , __lowerCamelCase: List[str]=32 , __lowerCamelCase: Optional[Any]=5 , __lowerCamelCase: List[str]=4 , __lowerCamelCase: str=37 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: int=0.1 , __lowerCamelCase: Optional[Any]=0.1 , __lowerCamelCase: Tuple=5_12 , __lowerCamelCase: int=16 , __lowerCamelCase: str=2 , __lowerCamelCase: Optional[Any]=0.02 , __lowerCamelCase: Optional[Any]=4 , ) -> str:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[str] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = num_choices
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = None
if self.use_attention_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__lowerCamelCase , )
return config, input_ids, attention_mask
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Any = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class_name.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Dict = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__UpperCAmelCase : Any = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : int = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__UpperCAmelCase : str = (1, 11, 7_68)
self.assertEqual(output.shape , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 342
| 0
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
snake_case_ : int = "bart"
snake_case_ : Union[str, Any] = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Union[str, Any]:
if LOAD_DENSE_INDEX:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
UpperCAmelCase_ : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
UpperCAmelCase_ : Tuple = qar_model.eval()
else:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = (None, None)
if MODEL_TYPE == "bart":
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
UpperCAmelCase_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
UpperCAmelCase_ : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
UpperCAmelCase_ : Any = sas_model.eval()
else:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> List[str]:
if LOAD_DENSE_INDEX:
UpperCAmelCase_ : List[str] = faiss.StandardGpuResources()
UpperCAmelCase_ : Optional[int] = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''' )['''train''']
UpperCAmelCase_ : Optional[Any] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 128), )
UpperCAmelCase_ : Any = faiss.IndexFlatIP(128 )
UpperCAmelCase_ : Any = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE__, 1, SCREAMING_SNAKE_CASE__ )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE__ ) # TODO fix for larger GPU
else:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = (None, None)
UpperCAmelCase_ : Tuple = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Tuple:
UpperCAmelCase_ : List[Any] = datasets.load_dataset('''eli5''', name='''LFQA_reddit''' )
UpperCAmelCase_ : Optional[Any] = elia['''train_eli5''']
UpperCAmelCase_ : Any = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 128) )
UpperCAmelCase_ : Optional[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE__ )
return (elia_train, eli5_train_q_index)
snake_case_ ,snake_case_ ,snake_case_ : Any = load_indexes()
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : str = load_models()
snake_case_ ,snake_case_ : Optional[int] = load_train_data()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple=10 ) -> Optional[int]:
UpperCAmelCase_ : List[str] = embed_questions_for_retrieval([question], SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = eli5_train_q_index.search(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = [elia_train[int(SCREAMING_SNAKE_CASE__ )] for i in I[0]]
return nn_examples
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Union[str, Any]="wiki40b", SCREAMING_SNAKE_CASE__ : Any="dense", SCREAMING_SNAKE_CASE__ : Optional[int]=10 ) -> str:
if source == "none":
UpperCAmelCase_ , UpperCAmelCase_ : int = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = query_qa_dense_index(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = query_es_index(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, index_name='''english_wiki40b_snippets_100w''', n_results=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : str = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
UpperCAmelCase_ : Any = '''question: {} context: {}'''.format(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE__ : None),
} )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Dict=64, SCREAMING_SNAKE_CASE__ : Tuple=256, SCREAMING_SNAKE_CASE__ : str=False, SCREAMING_SNAKE_CASE__ : Optional[Any]=2, SCREAMING_SNAKE_CASE__ : List[Any]=0.95, SCREAMING_SNAKE_CASE__ : List[str]=0.8 ) -> int:
with torch.no_grad():
UpperCAmelCase_ : List[str] = qa_sas_generate(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, num_answers=1, num_beams=SCREAMING_SNAKE_CASE__, min_len=SCREAMING_SNAKE_CASE__, max_len=SCREAMING_SNAKE_CASE__, do_sample=SCREAMING_SNAKE_CASE__, temp=SCREAMING_SNAKE_CASE__, top_p=SCREAMING_SNAKE_CASE__, top_k=SCREAMING_SNAKE_CASE__, max_input_length=1024, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
snake_case_ : int = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
snake_case_ : Tuple = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
snake_case_ : Dict = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
snake_case_ : Any = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
snake_case_ : Optional[Any] = st.sidebar.checkbox("Demo options")
if demo_options:
snake_case_ : List[str] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
snake_case_ : Optional[Any] = action_list.index(action_st)
snake_case_ : Dict = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
snake_case_ : str = show_type == "Show full text of passages"
else:
snake_case_ : str = 3
snake_case_ : str = True
snake_case_ : Optional[Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
snake_case_ : Any = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
snake_case_ : List[str] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
snake_case_ : Tuple = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
snake_case_ : List[Any] = "wiki40b"
snake_case_ : str = "dense"
snake_case_ : Tuple = "beam"
snake_case_ : Dict = 2
snake_case_ : str = 64
snake_case_ : str = 2_56
snake_case_ : int = None
snake_case_ : str = None
snake_case_ : List[str] = st.sidebar.checkbox("Generation options")
if generate_options:
snake_case_ : Union[str, Any] = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
snake_case_ : Optional[Any] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
snake_case_ : Union[str, Any] = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
snake_case_ : Dict = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
snake_case_ : List[Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
snake_case_ : str = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
snake_case_ : Optional[int] = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
snake_case_ : Optional[int] = None
# start main text
snake_case_ : int = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
snake_case_ : Union[str, Any] = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
snake_case_ : Optional[Any] = st.text_input("Enter your question here:", "")
else:
snake_case_ : Any = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
snake_case_ ,snake_case_ : List[str] = make_support(question, source=wiki_source, method="dense", n_results=10)
snake_case_ ,snake_case_ : int = make_support(question, source=wiki_source, method="sparse", n_results=10)
snake_case_ : List[str] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
snake_case_ : Optional[Any] = support_list[:10]
snake_case_ : List[str] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
snake_case_ ,snake_case_ : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
snake_case_ ,snake_case_ : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
snake_case_ : int = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
snake_case_ : Union[str, Any] = res[1].strip()
if sec_titles == "":
snake_case_ : Optional[int] = "[{}]({})".format(res[0], wiki_url)
else:
snake_case_ : List[Any] = sec_titles.split(" & ")
snake_case_ : Union[str, Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
snake_case_ : Optional[int] = find_nearest_training(question)
snake_case_ : List[Any] = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
snake_case_ : List[str] = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
snake_case_ : Dict = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 125
|
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
snake_case_ : Union[str, Any] = 50_00_00
snake_case_ ,snake_case_ : Optional[int] = os.path.split(__file__)
snake_case_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Dict ) -> str:
UpperCAmelCase_ : List[str] = dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : datasets.Dataset, **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
UpperCAmelCase_ : Optional[int] = dataset.filter(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( ) -> Any:
UpperCAmelCase_ : List[str] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Optional[int] = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCAmelCase_ : Dict = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__, '''dataset.arrow''' ), SCREAMING_SNAKE_CASE__, num_examples=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return tokenizer(examples['''text'''] )
UpperCAmelCase_ : List[str] = map(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''numpy''' ):
UpperCAmelCase_ : Dict = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''pandas''' ):
UpperCAmelCase_ : Union[str, Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
UpperCAmelCase_ : Optional[int] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
UpperCAmelCase_ : Optional[Any] = map(SCREAMING_SNAKE_CASE__, function=lambda SCREAMING_SNAKE_CASE__ : None, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = map(SCREAMING_SNAKE_CASE__, function=SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__, '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 125
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 370
|
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29
| 0
|
import random
def __UpperCamelCase ( _A : int ) ->bool:
"""simple docstring"""
lowerCamelCase_ =num - 1
lowerCamelCase_ =0
while s % 2 == 0:
lowerCamelCase_ =s // 2
t += 1
for _ in range(5 ):
lowerCamelCase_ =random.randrange(2 , num - 1 )
lowerCamelCase_ =pow(_A , _A , _A )
if v != 1:
lowerCamelCase_ =0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCamelCase_ =i + 1
lowerCamelCase_ =(v**2) % num
return True
def __UpperCamelCase ( _A : int ) ->bool:
"""simple docstring"""
if num < 2:
return False
lowerCamelCase_ =[
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_A )
def __UpperCamelCase ( _A : int = 1024 ) ->int:
"""simple docstring"""
while True:
lowerCamelCase_ =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_A ):
return num
if __name__ == "__main__":
__A : Union[str, Any] = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 154
|
from __future__ import annotations
import math
def __UpperCamelCase ( _A : int , _A : int , _A : bool , _A : list[int] , _A : float ) ->int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(_A ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
return min(
minimax(depth + 1 , node_index * 2 , _A , _A , _A ) , minimax(depth + 1 , node_index * 2 + 1 , _A , _A , _A ) , )
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
lowerCamelCase_ =[90, 23, 6, 33, 21, 65, 123, 34423]
lowerCamelCase_ =math.log(len(_A ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , _A , _A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 154
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Dict = logging.get_logger(__name__)
_a : Tuple = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Tuple ="""deformable_detr"""
a : int ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Dict,__SCREAMING_SNAKE_CASE : List[str]=True,__SCREAMING_SNAKE_CASE : int=None,__SCREAMING_SNAKE_CASE : Optional[Any]=3,__SCREAMING_SNAKE_CASE : Union[str, Any]=3_00,__SCREAMING_SNAKE_CASE : List[Any]=10_24,__SCREAMING_SNAKE_CASE : str=6,__SCREAMING_SNAKE_CASE : Optional[int]=10_24,__SCREAMING_SNAKE_CASE : Optional[Any]=8,__SCREAMING_SNAKE_CASE : int=6,__SCREAMING_SNAKE_CASE : Dict=10_24,__SCREAMING_SNAKE_CASE : str=8,__SCREAMING_SNAKE_CASE : Any=0.0,__SCREAMING_SNAKE_CASE : Union[str, Any]=True,__SCREAMING_SNAKE_CASE : Dict="relu",__SCREAMING_SNAKE_CASE : Union[str, Any]=2_56,__SCREAMING_SNAKE_CASE : Tuple=0.1,__SCREAMING_SNAKE_CASE : Tuple=0.0,__SCREAMING_SNAKE_CASE : List[str]=0.0,__SCREAMING_SNAKE_CASE : Tuple=0.02,__SCREAMING_SNAKE_CASE : Dict=1.0,__SCREAMING_SNAKE_CASE : Union[str, Any]=True,__SCREAMING_SNAKE_CASE : Dict=False,__SCREAMING_SNAKE_CASE : List[str]="sine",__SCREAMING_SNAKE_CASE : List[Any]="resnet50",__SCREAMING_SNAKE_CASE : Tuple=True,__SCREAMING_SNAKE_CASE : Dict=False,__SCREAMING_SNAKE_CASE : Optional[int]=4,__SCREAMING_SNAKE_CASE : Optional[Any]=4,__SCREAMING_SNAKE_CASE : int=4,__SCREAMING_SNAKE_CASE : str=False,__SCREAMING_SNAKE_CASE : List[Any]=3_00,__SCREAMING_SNAKE_CASE : Tuple=False,__SCREAMING_SNAKE_CASE : Dict=1,__SCREAMING_SNAKE_CASE : str=5,__SCREAMING_SNAKE_CASE : List[str]=2,__SCREAMING_SNAKE_CASE : int=1,__SCREAMING_SNAKE_CASE : int=1,__SCREAMING_SNAKE_CASE : List[Any]=5,__SCREAMING_SNAKE_CASE : int=2,__SCREAMING_SNAKE_CASE : Any=0.1,__SCREAMING_SNAKE_CASE : Optional[int]=0.25,__SCREAMING_SNAKE_CASE : Optional[Any]=False,**__SCREAMING_SNAKE_CASE : Any,):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = backbone_config.get("""model_type""" )
__lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase = config_class.from_dict(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = use_timm_backbone
__lowerCAmelCase = backbone_config
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_queries
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = init_xavier_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = auxiliary_loss
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = backbone
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = dilation
# deformable attributes
__lowerCAmelCase = num_feature_levels
__lowerCAmelCase = encoder_n_points
__lowerCAmelCase = decoder_n_points
__lowerCAmelCase = two_stage
__lowerCAmelCase = two_stage_num_proposals
__lowerCAmelCase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__lowerCAmelCase = class_cost
__lowerCAmelCase = bbox_cost
__lowerCAmelCase = giou_cost
# Loss coefficients
__lowerCAmelCase = mask_loss_coefficient
__lowerCAmelCase = dice_loss_coefficient
__lowerCAmelCase = bbox_loss_coefficient
__lowerCAmelCase = giou_loss_coefficient
__lowerCAmelCase = eos_coefficient
__lowerCAmelCase = focal_alpha
__lowerCAmelCase = disable_custom_kernels
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return self.d_model
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowerCAmelCase = self.backbone_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 354
|
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_a : List[Any] = logging.get_logger(__name__)
_a : Union[str, Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCAmelCase = model
__lowerCAmelCase = kwargs.get("""model_save_dir""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = kwargs.get("""latest_model_name""",__SCREAMING_SNAKE_CASE )
def __call__( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {k: np.array(__SCREAMING_SNAKE_CASE ) for k, v in kwargs.items()}
return self.model.run(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCAmelCase = """CPUExecutionProvider"""
return ort.InferenceSession(__SCREAMING_SNAKE_CASE,providers=[provider],sess_options=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).joinpath(__SCREAMING_SNAKE_CASE )
try:
shutil.copyfile(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCAmelCase = self.model_save_dir.joinpath(__SCREAMING_SNAKE_CASE )
if src_path.exists():
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).joinpath(__SCREAMING_SNAKE_CASE )
try:
shutil.copyfile(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
except shutil.SameFileError:
pass
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(__SCREAMING_SNAKE_CASE,exist_ok=__SCREAMING_SNAKE_CASE )
# saving model weights/files
self._save_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ),provider=__SCREAMING_SNAKE_CASE,sess_options=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE )
# load model from hub
else:
# download model
__lowerCAmelCase = hf_hub_download(
repo_id=__SCREAMING_SNAKE_CASE,filename=__SCREAMING_SNAKE_CASE,use_auth_token=__SCREAMING_SNAKE_CASE,revision=__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE,force_download=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).parent
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).name
__lowerCAmelCase = OnnxRuntimeModel.load_model(__SCREAMING_SNAKE_CASE,provider=__SCREAMING_SNAKE_CASE,sess_options=__SCREAMING_SNAKE_CASE )
return cls(model=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = None
if len(str(__SCREAMING_SNAKE_CASE ).split("""@""" ) ) == 2:
__lowerCAmelCase , __lowerCAmelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=__SCREAMING_SNAKE_CASE,revision=__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE,force_download=__SCREAMING_SNAKE_CASE,use_auth_token=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
| 46
| 0
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_UpperCAmelCase = """examples/"""
_UpperCAmelCase = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_UpperCAmelCase = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_UpperCAmelCase = """README.md"""
def __magic_name__ ( lowercase , lowercase , lowercase ):
with open(lowercase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[Any] =f.read()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_: Dict =replace.replace("""VERSION""" , lowercase )
SCREAMING_SNAKE_CASE_: List[str] =re_pattern.sub(lowercase , lowercase )
with open(lowercase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(lowercase )
def __magic_name__ ( lowercase ):
for folder, directories, fnames in os.walk(lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowercase , lowercase ) , lowercase , pattern="""examples""" )
def __magic_name__ ( lowercase , lowercase=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase , lowercase , lowercase )
if not patch:
update_version_in_examples(lowercase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str ="""🤗 Transformers currently provides the following architectures"""
SCREAMING_SNAKE_CASE_: Any ="""1. Want to contribute a new model?"""
with open(lowercase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE_: int =f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_: List[str] =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_: List[str] =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
SCREAMING_SNAKE_CASE_: List[Any] =lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(lowercase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowercase )
def __magic_name__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
SCREAMING_SNAKE_CASE_: Union[str, Any] =f.read()
SCREAMING_SNAKE_CASE_: Optional[int] =REPLACE_PATTERNS["""init"""][0].search(lowercase ).groups()[0]
return packaging.version.parse(lowercase )
def __magic_name__ ( lowercase=False ):
SCREAMING_SNAKE_CASE_: int =get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_: List[str] =default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_: str =f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
SCREAMING_SNAKE_CASE_: int =f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_: Any =input(f'''Which version are you releasing? [{default_version}]''' )
if len(lowercase ) == 0:
SCREAMING_SNAKE_CASE_: Optional[Any] =default_version
print(f'''Updating version to {version}.''' )
global_version_update(lowercase , patch=lowercase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[str] =get_version()
SCREAMING_SNAKE_CASE_: str =f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
SCREAMING_SNAKE_CASE_: Optional[Any] =current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_: str =input(f'''Which version are we developing now? [{dev_version}]''' )
if len(lowercase ) == 0:
SCREAMING_SNAKE_CASE_: Any =dev_version
print(f'''Updating version to {version}.''' )
global_version_update(lowercase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 173
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE_: Tuple =[(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __magic_name__ ( lowercase , lowercase , lowercase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_: Dict =""""""
else:
SCREAMING_SNAKE_CASE_: Dict ="""deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_: Any =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_: Any =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: int =in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_: List[str] =in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_: Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_: Optional[int] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_: int =in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_: str =in_proj_bias[-config.hidden_size :]
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =dct.pop(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =val
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: Dict =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE_: Optional[int] =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_: Dict =1000
SCREAMING_SNAKE_CASE_: Tuple ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: int ="""imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE_: Optional[int] =json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: Dict ={int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: int =idalabel
SCREAMING_SNAKE_CASE_: List[str] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Any =int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE_: int =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
SCREAMING_SNAKE_CASE_: str =192
SCREAMING_SNAKE_CASE_: str =768
SCREAMING_SNAKE_CASE_: str =12
SCREAMING_SNAKE_CASE_: List[str] =3
elif deit_name[9:].startswith("""small""" ):
SCREAMING_SNAKE_CASE_: List[str] =384
SCREAMING_SNAKE_CASE_: Optional[Any] =1536
SCREAMING_SNAKE_CASE_: Dict =12
SCREAMING_SNAKE_CASE_: List[Any] =6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
SCREAMING_SNAKE_CASE_: str =1024
SCREAMING_SNAKE_CASE_: Any =4096
SCREAMING_SNAKE_CASE_: str =24
SCREAMING_SNAKE_CASE_: str =16
# load original model from timm
SCREAMING_SNAKE_CASE_: int =timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_: List[Any] =timm_model.state_dict()
SCREAMING_SNAKE_CASE_: int =create_rename_keys(lowercase , lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , lowercase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_: List[Any] =DeiTForImageClassificationWithTeacher(lowercase ).eval()
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE_: Tuple =int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE_: Optional[int] =DeiTImageProcessor(size=lowercase , crop_size=config.image_size )
SCREAMING_SNAKE_CASE_: List[Any] =image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =encoding["""pixel_values"""]
SCREAMING_SNAKE_CASE_: Tuple =model(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 173
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( a_: list[int] ):
if not numbers:
return 0
if not isinstance(a_, (list, tuple) ) or not all(
isinstance(a_, a_ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
_UpperCAmelCase : Any = numbers[0]
for i in range(1, len(a_ ) ):
# update the maximum and minimum subarray products
_UpperCAmelCase : int = numbers[i]
if number < 0:
_UpperCAmelCase : Union[str, Any] = min_till_now, max_till_now
_UpperCAmelCase : str = max(a_, max_till_now * number )
_UpperCAmelCase : List[str] = min(a_, min_till_now * number )
# update the maximum product found till now
_UpperCAmelCase : int = max(a_, a_ )
return max_prod
| 369
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__ :
"""simple docstring"""
UpperCamelCase_ : Any = XGLMConfig
UpperCamelCase_ : Union[str, Any] = {}
UpperCamelCase_ : Dict = '''gelu'''
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_4 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=9_9 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Any=3_7 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : str = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : List[Any] = use_input_mask
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Tuple = ffn_dim
_UpperCAmelCase : Any = activation_function
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Any = None
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Tuple = 1
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_UpperCAmelCase : Any = None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Optional[Any] = self.get_config()
_UpperCAmelCase : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
_UpperCAmelCase : Optional[int] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Tuple = False
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = TFXGLMModelTester(self )
_UpperCAmelCase : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=True ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_UpperCAmelCase : int = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
_UpperCAmelCase : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
_UpperCAmelCase : Any = tokenizer("Today is a nice day and" , return_tensors="tf" )
_UpperCAmelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
_UpperCAmelCase : List[Any] = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
_UpperCAmelCase : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_UpperCAmelCase : Optional[int] = "left"
# use different length sentences to test batching
_UpperCAmelCase : Tuple = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
_UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = inputs["input_ids"]
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 )
_UpperCAmelCase : int = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 )
_UpperCAmelCase : Optional[int] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_UpperCAmelCase : List[Any] = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=1_2 )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
| 17
| 0
|
from __future__ import annotations
from random import choice
def __lowercase ( _A ) -> List[str]:
return choice(_A )
def __lowercase ( _A , _A ) -> int:
SCREAMING_SNAKE_CASE : int = random_pivot(_A )
# partition based on pivot
# linear time
SCREAMING_SNAKE_CASE : List[str] = [e for e in lst if e < pivot]
SCREAMING_SNAKE_CASE : str = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_A ) < k - 1:
return kth_number(_A , k - len(_A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__: Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__magic_name__: List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__magic_name__ : Any = bs[:]
__magic_name__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__magic_name__ : List[str] = [chr(_A ) for n in cs]
return dict(zip(_A, _A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[Any] = char
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict:
__magic_name__ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__magic_name__ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Union[str, Any] = json.load(lowerCAmelCase__ )
__magic_name__ : Any = {v: k for k, v in self.encoder.items()}
__magic_name__ : Tuple = errors # how to handle errors in decoding
__magic_name__ : Tuple = bytes_to_unicode()
__magic_name__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : str = {}
__magic_name__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __magic_name__ ( self ) -> Optional[Any]:
return len(self.encoder )
def __magic_name__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : List[str] = bigram
__magic_name__ : Any = []
__magic_name__ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : str = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : List[str] = get_pairs(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = """ """.join(lowerCAmelCase__ )
__magic_name__ : str = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : str = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__magic_name__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : Optional[Any] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : Optional[int] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
__magic_name__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__magic_name__ : List[Any] = """ """ + text
return (text, kwargs)
| 342
| 0
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase__ = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase ( cls : Any ):
lowerCAmelCase_ : Optional[int] = TOKEN
HfFolder.save_token(a_ )
@classmethod
def lowerCamelCase ( cls : Dict ):
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
lowerCAmelCase_ : Any = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a_ , getattr(a_ , a_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a_ , repo_id="test-config" , push_to_hub=a_ , use_auth_token=self._token )
lowerCAmelCase_ : int = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a_ , getattr(a_ , a_ ) )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
lowerCAmelCase_ : Dict = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a_ , getattr(a_ , a_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a_ , repo_id="valid_org/test-config-org" , push_to_hub=a_ , use_auth_token=self._token )
lowerCAmelCase_ : str = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a_ , getattr(a_ , a_ ) )
def lowerCamelCase ( self : Any ):
CustomConfig.register_for_auto_class()
lowerCAmelCase_ : Optional[int] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=a_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : List[str] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCAmelCase_ : Union[str, Any] = c.n_embd + 1 # int
lowerCAmelCase_ : Dict = c.resid_pdrop + 1.0 # float
lowerCAmelCase_ : List[str] = not c.scale_attn_weights # bool
lowerCAmelCase_ : Dict = c.summary_type + "foo" # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(a_ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(a_ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(a_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(a_ , c.summary_type , "mismatch for key: summary_type" )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : List[Any] = PretrainedConfig()
lowerCAmelCase_ : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
a_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
lowerCAmelCase_ : Tuple = [key for key, value in config_common_kwargs.items() if value == getattr(a_ , a_ )]
if len(a_ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f''' {", ".join(a_ )}.''' )
def lowerCamelCase ( self : Optional[int] ):
with self.assertRaises(a_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase_ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
lowerCAmelCase_ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(a_ )
def lowerCamelCase ( self : Optional[Any] ):
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : Union[str, Any] = mock.Mock()
lowerCAmelCase_ : List[Any] = 5_00
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Dict = HTTPError
lowerCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=a_ ) as mock_head:
lowerCAmelCase_ : Optional[Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : Any ):
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ : Optional[Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained("bert-base-cased" )
lowerCAmelCase_ : List[Any] = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(a_ )
lowerCAmelCase_ : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(a_ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained(a_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCAmelCase_ : int = ["config.42.0.0.json"]
lowerCAmelCase_ : List[Any] = 7_68
configuration.save_pretrained(a_ )
shutil.move(os.path.join(a_ , "config.4.0.0.json" ) , os.path.join(a_ , "config.42.0.0.json" ) )
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained(a_ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowerCamelCase ( self : Optional[int] ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCAmelCase_ : Any = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
lowerCAmelCase_ : Any = "v4.0.0"
lowerCAmelCase_ : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
a_ , return_unused_kwargs=a_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(a_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCAmelCase_ : str = "v3.0.0"
lowerCAmelCase_ : str = old_transformers.models.auto.AutoConfig.from_pretrained(a_ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 358
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Tuple = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
lowerCAmelCase_ : str = np.array(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = (1, 2, 1)
lowerCAmelCase_ : str = (1, 1, 0, 7)
lowerCAmelCase_ : List[Any] = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = model.fit(disp=__UpperCamelCase , maxiter=600 , method="nm" )
lowerCAmelCase_ : Optional[Any] = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : int = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Dict = regressor.predict(__UpperCamelCase )
return y_pred[0]
def __lowerCamelCase ( __UpperCamelCase ) -> float:
"""simple docstring"""
train_user.sort()
lowerCAmelCase_ : Optional[Any] = np.percentile(__UpperCamelCase , 25 )
lowerCAmelCase_ : List[Any] = np.percentile(__UpperCamelCase , 75 )
lowerCAmelCase_ : Union[str, Any] = qa - qa
lowerCAmelCase_ : List[Any] = qa - (iqr * 0.1)
return low_lim
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Union[str, Any] = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase_ : Tuple = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase__ = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowercase__ = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
lowercase__ = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase__ = normalize_df[:, 2].tolist()
lowercase__ = normalize_df[:, 0].tolist()
lowercase__ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase__ = normalize_df[:, [1, 2]].tolist()
lowercase__ = x[: len(x) - 1]
lowercase__ = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase__ = total_date[: len(total_date) - 1]
lowercase__ = total_user[: len(total_user) - 1]
lowercase__ = total_match[: len(total_match) - 1]
lowercase__ = total_date[len(total_date) - 1 :]
lowercase__ = total_user[len(total_user) - 1 :]
lowercase__ = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase__ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase__ = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 161
| 0
|
"""simple docstring"""
lowerCAmelCase__ = range(2, 20 + 1)
lowerCAmelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase : int = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase , lowerCAmelCase : List[Any] = 0, 0
lowerCAmelCase : int = n - i
lowerCAmelCase : Optional[Any] = memo.get(SCREAMING_SNAKE_CASE )
if sub_memo is not None:
lowerCAmelCase : Dict = sub_memo.get(SCREAMING_SNAKE_CASE )
if jumps is not None and len(SCREAMING_SNAKE_CASE ) > 0:
# find and make the largest jump without going over
lowerCAmelCase : int = -1
for _k in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase : str = _k
break
if max_jump >= 0:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase : Optional[int] = diff + c
for j in range(min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) ):
lowerCAmelCase , lowerCAmelCase : Any = divmod(SCREAMING_SNAKE_CASE , 1_0 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Dict = []
else:
lowerCAmelCase : Union[str, Any] = {c: []}
lowerCAmelCase : List[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase , lowerCAmelCase : Any = next_term(SCREAMING_SNAKE_CASE , k - 1 , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase , lowerCAmelCase : Dict = compute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , i + dn , SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
lowerCAmelCase : Any = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase : Optional[Any] = 0
while j < len(SCREAMING_SNAKE_CASE ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE , (diff, dn, k) )
return (diff, dn)
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase : int = i
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase : Dict = ds_c + ds_b
diff += addend
lowerCAmelCase : str = 0
for j in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = a_i[j] + addend
lowerCAmelCase , lowerCAmelCase : Any = divmod(SCREAMING_SNAKE_CASE , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return diff, i - start_i
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : Optional[int] = digits[j] + addend
if s >= 1_0:
lowerCAmelCase , lowerCAmelCase : Dict = divmod(SCREAMING_SNAKE_CASE , 1_0 )
lowerCAmelCase : str = addend // 1_0 + quotient
else:
lowerCAmelCase : List[Any] = s
lowerCAmelCase : Dict = addend // 1_0
if addend == 0:
break
while addend > 0:
lowerCAmelCase , lowerCAmelCase : List[str] = divmod(SCREAMING_SNAKE_CASE , 1_0 )
digits.append(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0**1_5 ):
'''simple docstring'''
lowerCAmelCase : Any = [1]
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Optional[Any] = 0
while True:
lowerCAmelCase , lowerCAmelCase : Any = next_term(SCREAMING_SNAKE_CASE , 2_0 , i + dn , SCREAMING_SNAKE_CASE )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase : Any = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 108
|
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29
| 0
|
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = a
A__ = b
if function(lowercase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase_ ) == 0:
return b
elif (
function(lowercase_ ) * function(lowercase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
A__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase_ ) == 0:
return mid
elif function(lowercase_ ) * function(lowercase_ ) < 0:
A__ = mid
else:
A__ = mid
A__ = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 231
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231
| 1
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , )->Optional[int]:
'''simple docstring'''
A_ : List[Any] = parent
A_ : Dict = 13
A_ : Dict = 7
A_ : str = True
A_ : List[str] = True
A_ : Dict = False
A_ : Optional[int] = True
A_ : Optional[int] = 99
A_ : List[Any] = 32
A_ : Tuple = 2
A_ : Tuple = 4
A_ : List[str] = 37
A_ : Any = '''gelu'''
A_ : Tuple = 0.1
A_ : int = 0.1
A_ : str = 512
A_ : str = 16
A_ : List[str] = 2
A_ : Dict = 0.0_2
A_ : Optional[int] = 3
A_ : Union[str, Any] = 4
A_ : str = None
def _snake_case ( self )->str:
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Tuple = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : int = None
A_ : Tuple = None
A_ : List[str] = None
if self.use_labels:
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : List[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : str = TFDistilBertModel(config=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
A_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
A_ : int = [input_ids, input_mask]
A_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : List[str] = TFDistilBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
A_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
A_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : List[Any] = TFDistilBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
A_ : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = self.num_labels
A_ : Optional[int] = TFDistilBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
A_ : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : str = self.num_choices
A_ : Tuple = TFDistilBertForMultipleChoice(_SCREAMING_SNAKE_CASE )
A_ : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A_ : Tuple = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A_ : Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
A_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : Dict = self.num_labels
A_ : Any = TFDistilBertForTokenClassification(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
A_ : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) : Tuple = config_and_inputs
A_ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
snake_case = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : int = TFDistilBertModelTester(self )
A_ : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37 )
def _snake_case ( self )->str:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->List[str]:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A_ : str = TFDistilBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Any = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
A_ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )[0]
A_ : Optional[Any] = [1, 6, 768]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
A_ : List[str] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 186
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowercase :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = """gelu"""
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def _snake_case ( self ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertModel(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = TFDistilBertForMaskedLM(config=lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = TFDistilBertForQuestionAnswering(config=lowercase )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForSequenceClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFDistilBertForMultipleChoice(lowercase )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFDistilBertForTokenClassification(lowercase )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Dict:
lowerCAmelCase = TFDistilBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , dim=37 )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def _snake_case ( self ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
lowerCAmelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(lowercase )[0]
lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , lowercase )
lowerCAmelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4 )
| 46
| 0
|
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
os.makedirs(__A , exist_ok=__A )
lowerCAmelCase_ :Union[str, Any] = {"""source""": """What is love ?""", """target""": """life"""}
lowerCAmelCase_ :Optional[Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCAmelCase_ :Optional[Any] = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(__A , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(__A )
def __lowerCAmelCase ( self , __A , __A = "pytorch" ) -> Optional[Any]:
lowerCAmelCase_ :Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Optional[Any] = os.path.join(__A , """output""" )
lowerCAmelCase_ :str = os.path.join(__A , """data""" )
self._create_dummy_data(data_dir=__A )
lowerCAmelCase_ :Optional[Any] = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
lowerCAmelCase_ :List[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__A , env=self.get_env() )
lowerCAmelCase_ :str = os.path.join(__A , """metrics.json""" )
with open(__A ) as f:
lowerCAmelCase_ :List[Any] = json.load(__A )
return result
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Tuple = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :int = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 1
|
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Dict = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = "glpn"
def __init__( self , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[32, 64, 160, 256] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[1, 2, 5, 8] , A=[4, 4, 4, 4] , A="gelu" , A=0.0 , A=0.0 , A=0.0_2 , A=0.1 , A=1e-6 , A=64 , A=10 , A=-1 , **A , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : Optional[Any] = num_encoder_blocks
UpperCAmelCase : List[Any] = depths
UpperCAmelCase : List[str] = sr_ratios
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : List[Any] = patch_sizes
UpperCAmelCase : Dict = strides
UpperCAmelCase : Tuple = mlp_ratios
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Optional[int] = drop_path_rate
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = decoder_hidden_size
UpperCAmelCase : Any = max_depth
UpperCAmelCase : str = head_in_index
| 265
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = "time_series_transformer"
__UpperCAmelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ )
@property
def _lowercase ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 17
| 0
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__A = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Any = VOCAB_FILES_NAMES
__magic_name__ :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :Optional[int] = ["""input_ids""", """attention_mask"""]
__magic_name__ :List[str] = TaTokenizer
__magic_name__ :List[int] = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ :Any = [F"<extra_id_{i}>" for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCAmelCase__ :Optional[int] = len(set(filter(lambda __UpperCAmelCase : bool('extra_id_' in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :List[str] = vocab_file
lowerCAmelCase__ :List[Any] = False if not self.vocab_file else True
lowerCAmelCase__ :List[Any] = extra_ids
@staticmethod
def snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCAmelCase__ :Optional[Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , __UpperCAmelCase , )
return max_model_length
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ :Dict = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
logger.info(F"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCAmelCase__ :Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self ):
'''simple docstring'''
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(R'<extra_id_\d+>' , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def snake_case ( self ):
'''simple docstring'''
return [self.convert_tokens_to_ids(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 254
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 254
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[Any] ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) )
def _snake_case ( lowercase__ : Any , lowercase__ : Any ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
lowerCAmelCase_ :List[str] = (
"""Wrong input data\'s dimensions... """
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowercase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase_ :List[str] = (
"""Wrong input data\'s shape... """
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowercase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCAmelCase_ :Any = (
"""Input data have different datatype... """
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowercase__ )
lowerCAmelCase_ :Optional[Any] = []
for value in value_array:
lowerCAmelCase_ :Union[str, Any] = euclidean(lowercase__ , dataset[0] )
lowerCAmelCase_ :int = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase_ :Union[str, Any] = euclidean(lowercase__ , lowercase__ )
if dist > temp_dist:
lowerCAmelCase_ :Dict = temp_dist
lowerCAmelCase_ :Any = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] ) -> float:
'''simple docstring'''
return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = torch.exp(UpperCAmelCase )
__A = torch.sum(UpperCAmelCase , dim=1 ) # sum of exp(x_i)
__A = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(UpperCAmelCase ) - B / A
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :int ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__A = config.output_attentions
__A = config.output_hidden_states
__A = nn.ModuleList([BertLayer(_A ) for _ in range(config.num_hidden_layers )] )
__A = nn.ModuleList([BertHighway(_A ) for _ in range(config.num_hidden_layers )] )
__A = [-1 for _ in range(config.num_hidden_layers )]
def lowercase_ ( self :Any , _A :List[Any] ) -> Tuple:
'''simple docstring'''
if (type(_A ) is float) or (type(_A ) is int):
for i in range(len(self.early_exit_entropy ) ):
__A = x
else:
__A = x
def lowercase_ ( self :Optional[Any] , _A :List[str] ) -> Dict:
'''simple docstring'''
__A = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowercase_ ( self :List[Any] , _A :Tuple , _A :Tuple=None , _A :int=None , _A :List[Any]=None , _A :str=None , ) -> Tuple:
'''simple docstring'''
__A = ()
__A = ()
__A = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = layer_module(
_A , _A , head_mask[i] , _A , _A )
__A = layer_outputs[0]
if self.output_attentions:
__A = all_attentions + (layer_outputs[1],)
__A = (hidden_states,)
if self.output_hidden_states:
__A = current_outputs + (all_hidden_states,)
if self.output_attentions:
__A = current_outputs + (all_attentions,)
__A = self.highway[i](_A )
# logits, pooled_output
if not self.training:
__A = highway_exit[0]
__A = entropy(_A )
__A = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__A = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__A = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_A , i + 1 )
else:
__A = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = (hidden_states,)
if self.output_hidden_states:
__A = outputs + (all_hidden_states,)
if self.output_attentions:
__A = outputs + (all_attentions,)
__A = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Tuple , _A :List[str] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config
__A = BertEmbeddings(_A )
__A = DeeBertEncoder(_A )
__A = BertPooler(_A )
self.init_weights()
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def lowercase_ ( self :Tuple , _A :Tuple ) -> Union[str, Any]:
'''simple docstring'''
__A = value
def lowercase_ ( self :int , _A :int ) -> Tuple:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_A )
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :int=None , _A :List[Any]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Any=None , _A :List[str]=None , _A :Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__A = input_ids.size()
elif inputs_embeds is not None:
__A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__A = torch.ones(_A , device=_A )
if encoder_attention_mask is None:
__A = torch.ones(_A , device=_A )
if token_type_ids is None:
__A = torch.zeros(_A , dtype=torch.long , device=_A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__A = self.get_extended_attention_mask(_A , _A , _A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__A = encoder_attention_mask[:, None, None, :]
__A = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__A = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__A = self.get_head_mask(_A , self.config.num_hidden_layers )
__A = self.embeddings(
input_ids=_A , position_ids=_A , token_type_ids=_A , inputs_embeds=_A )
__A = self.encoder(
_A , attention_mask=_A , head_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A = encoder_outputs[0]
__A = self.pooler(_A )
__A = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[Any] , _A :str , _A :List[str] ) -> Optional[int]:
'''simple docstring'''
__A = message
__A = exit_layer # start from 1!
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
__A = BertPooler(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , config.num_labels )
def lowercase_ ( self :List[Any] , _A :Optional[Any] ) -> int:
'''simple docstring'''
__A = encoder_outputs[0]
__A = self.pooler(_A )
# "return" pooler_output
# BertModel
__A = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__A = bmodel_output[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :str , _A :Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config.num_labels
__A = config.num_hidden_layers
__A = DeeBertModel(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :str=None , _A :Optional[int]=None , _A :Any=None , _A :str=None , _A :int=None , _A :Tuple=None , _A :Any=None , _A :List[str]=-1 , _A :Optional[Any]=False , ) -> List[str]:
'''simple docstring'''
__A = self.num_layers
try:
__A = self.bert(
_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__A = outputs[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
__A = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__A = e.message
__A = e.exit_layer
__A = outputs[0]
if not self.training:
__A = entropy(_A )
__A = []
__A = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__A = []
for highway_exit in outputs[-1]:
__A = highway_exit[0]
if not self.training:
highway_logits_all.append(_A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_A )
if train_highway:
__A = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__A = (loss,) + outputs
if not self.training:
__A = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__A = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 161
| 0
|
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A (self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ):
A = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2 )
A = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def A (self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
for example in examples:
A = video_classifier(_lowerCAmelCase )
self.assertEqual(
_lowerCAmelCase , [
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
{"""score""": ANY(_lowerCAmelCase ), """label""": ANY(_lowerCAmelCase )},
] , )
@require_torch
def A (self : Optional[Any] ):
A = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
A = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
A = pipeline(
"""video-classification""" , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4 )
A = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
A = video_classifier(_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
A = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def A (self : Union[str, Any] ):
pass
| 337
|
'''simple docstring'''
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = credit_card_number
A = 0
A = len(UpperCAmelCase ) - 2
for i in range(UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
A = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
A = cc_number[:i] + str(UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(UpperCAmelCase ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(UpperCAmelCase ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(UpperCAmelCase ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 337
| 1
|
from __future__ import annotations
_A = 10
def lowerCamelCase__ ( __lowerCAmelCase : list[int] ):
"""simple docstring"""
lowerCAmelCase_ = 1
lowerCAmelCase_ = max(__lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase_ = [[] for _ in range(__lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCAmelCase )
# put each buckets' contents into list_of_ints
lowerCAmelCase_ = 0
for b in range(__lowerCAmelCase ):
for i in buckets[b]:
lowerCAmelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( __a ):
_lowercase ='''megatron-bert'''
def __init__( self , _UpperCamelCase=29_056 , _UpperCamelCase=1_024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4_096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=True , **_UpperCamelCase , ) -> int:
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
| 231
| 1
|
"""simple docstring"""
import os
def UpperCAmelCase ( ) -> List[str]:
with open(os.path.dirname(UpperCAmelCase ) + '/p022_names.txt' ) as file:
snake_case_ = str(file.readlines()[0] )
snake_case_ = names.replace('"' , '' ).split(',' )
names.sort()
snake_case_ = 0
snake_case_ = 0
for i, name in enumerate(UpperCAmelCase ):
for letter in name:
name_score += ord(UpperCAmelCase ) - 64
total_score += (i + 1) * name_score
snake_case_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 359
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 0
|
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger()
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] , __a : str ):
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = {"source": "What is love ?", "target": "life"}
UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f:
f.write(__a )
def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ):
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = os.path.join(__a , "output" )
UpperCAmelCase_ = os.path.join(__a , "data" )
self._create_dummy_data(data_dir=__a )
UpperCAmelCase_ = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__a , env=self.get_env() )
UpperCAmelCase_ = os.path.join(__a , "metrics.json" )
with open(__a ) as f:
UpperCAmelCase_ = json.load(__a )
return result
@require_torch_gpu
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _lowercase (self : Dict ):
UpperCAmelCase_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowercase (self : Any ):
UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : int , *__a : Dict , **__a : str ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( lowercase__):
UpperCamelCase__ : Tuple =["""image_processor""", """tokenizer"""]
UpperCamelCase__ : Any ="""Pix2StructImageProcessor"""
UpperCamelCase__ : Tuple =("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Tuple, __lowercase : Optional[int], __lowercase : Dict ):
lowercase__ = False
super().__init__(__lowercase, __lowercase )
def __call__( self : List[Any], __lowercase : List[Any]=None, __lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, __lowercase : bool = True, __lowercase : Union[bool, str, PaddingStrategy] = False, __lowercase : Union[bool, str, TruncationStrategy] = None, __lowercase : Optional[int] = None, __lowercase : Optional[int] = 2048, __lowercase : int = 0, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, __lowercase : bool = False, __lowercase : bool = False, __lowercase : bool = False, __lowercase : bool = False, __lowercase : bool = False, __lowercase : bool = True, __lowercase : Optional[Union[str, TensorType]] = None, **__lowercase : Tuple, ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=__lowercase, add_special_tokens=__lowercase, padding=__lowercase, truncation=__lowercase, max_length=__lowercase, stride=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, return_overflowing_tokens=__lowercase, return_special_tokens_mask=__lowercase, return_offsets_mapping=__lowercase, return_token_type_ids=__lowercase, return_length=__lowercase, verbose=__lowercase, return_tensors=__lowercase, **__lowercase, )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase__ = self.image_processor(
__lowercase, return_tensors=__lowercase, max_patches=__lowercase, **__lowercase )
else:
# add pixel_values and bbox
lowercase__ = self.image_processor(
__lowercase, return_tensors=__lowercase, max_patches=__lowercase, header_text=__lowercase, **__lowercase )
if text is not None and not self.image_processor.is_vqa:
lowercase__ = self.tokenizer(
text=__lowercase, add_special_tokens=__lowercase, padding=__lowercase, truncation=__lowercase, max_length=__lowercase, stride=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, return_overflowing_tokens=__lowercase, return_special_tokens_mask=__lowercase, return_offsets_mapping=__lowercase, return_token_type_ids=__lowercase, return_length=__lowercase, verbose=__lowercase, return_tensors=__lowercase, **__lowercase, )
if "attention_mask" in text_encoding:
lowercase__ = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase__ = text_encoding.pop("input_ids" )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(__lowercase )
return encoding_image_processor
def A__ ( self : List[str], *__lowercase : Union[str, Any], **__lowercase : Optional[Any] ):
return self.tokenizer.batch_decode(*__lowercase, **__lowercase )
def A__ ( self : Tuple, *__lowercase : List[Any], **__lowercase : int ):
return self.tokenizer.decode(*__lowercase, **__lowercase )
@property
def A__ ( self : int ):
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 224
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowercase_ = 2_9979_2458
# Symbols
lowercase_ , lowercase_ , lowercase_ , lowercase_ = symbols("""ct x y z""")
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
# Ensure event is not empty
if event is None:
lowercase__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowercase_ = transform(2997_9245)
print("""Example of four vector: """)
print(F'ct\' = {four_vector[0]}')
print(F'x\' = {four_vector[1]}')
print(F'y\' = {four_vector[2]}')
print(F'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
lowercase_ = {ct: c, x: 1, y: 1, z: 1}
lowercase_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'\n{numerical_vector}')
| 224
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 254
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
__UpperCAmelCase : Optional[Any] = quote(lowerCAmelCase__ )
return hfh.hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" , revision=lowerCAmelCase__ )
| 254
| 1
|
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = parent
def _a (self ):
"""simple docstring"""
return {}
def a__ ( ) -> str:
UpperCAmelCase__ : Dict = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
UpperCAmelCase__ : Any = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractor if is_bsa_available() else None
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = MarkupLMFeatureExtractionTester(self )
@property
def _a (self ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase__ : Union[str, Any] = get_html_strings()[0]
UpperCAmelCase__ : Tuple = feature_extractor(_lowerCamelCase )
# fmt: off
UpperCAmelCase__ : Dict = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
UpperCAmelCase__ : int = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , _lowerCamelCase )
self.assertEqual(encoding.xpaths , _lowerCamelCase )
# Test batched
UpperCAmelCase__ : Union[str, Any] = get_html_strings()
UpperCAmelCase__ : Tuple = feature_extractor(_lowerCamelCase )
# fmt: off
UpperCAmelCase__ : Tuple = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
UpperCAmelCase__ : List[Any] = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _lowerCamelCase )
self.assertEqual(encoding.xpaths , _lowerCamelCase )
| 166
|
"""simple docstring"""
import numpy
# List of input, output pairs
_A = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_A = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_A = [2, 4, 1, 5]
_A = len(train_data)
_A = 0.009
def a__ ( lowerCAmelCase , lowerCAmelCase="train" ) -> int:
return calculate_hypothesis_value(lowerCAmelCase , lowerCAmelCase ) - output(
lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase ) -> int:
UpperCAmelCase__ : Dict = 0
for i in range(len(lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Any:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( lowerCAmelCase , lowerCAmelCase=m ) -> List[str]:
UpperCAmelCase__ : Optional[int] = 0
for i in range(lowerCAmelCase ):
if index == -1:
summation_value += _error(lowerCAmelCase )
else:
summation_value += _error(lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : int = summation_of_cost_derivative(lowerCAmelCase , lowerCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[str]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase__ : Any = 0.00_0002
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Union[str, Any] = 0
while True:
j += 1
UpperCAmelCase__ : str = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase ) ):
UpperCAmelCase__ : List[Any] = get_cost_derivative(i - 1 )
UpperCAmelCase__ : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase , lowerCAmelCase , atol=lowerCAmelCase , rtol=lowerCAmelCase , ):
break
UpperCAmelCase__ : Any = temp_parameter_vector
print(("""Number of iterations:""", j) )
def a__ ( ) -> Optional[int]:
for i in range(len(lowerCAmelCase ) ):
print(("""Actual output value:""", output(lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 166
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
A : Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowercase : Any = VideoClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , top_k=2 )
lowercase : List[Any] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for example in examples:
lowercase : List[Any] = video_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{'''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ )},
{'''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ )},
] , )
@require_torch
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowercase : Tuple = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
lowercase : List[str] = pipeline(
'''video-classification''' , model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , frame_sampling_rate=4 )
lowercase : Optional[int] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowercase : Optional[int] = video_classifier(SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
lowercase : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __lowerCamelCase ( self ):
pass
| 337
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337
| 1
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCAmelCase , lowerCAmelCase ):
@register_to_config
def __init__( self ,snake_case ,snake_case = None ,snake_case = None ):
'''simple docstring'''
super().__init__()
lowercase : Dict = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowercase : int = torch.zeros(snake_case ,snake_case )
else:
lowercase : Tuple = None
lowercase : List[str] = torch.nn.Parameter(snake_case )
class __snake_case ( lowerCAmelCase ):
_a : List[str]= 42
_a : List[Any]= 42
_a : List[str]= 42
_a : List[Any]= 42
_a : Optional[int]= 42
_a : List[Any]= 42
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=snake_case ,transformer=snake_case ,text_encoder=snake_case ,tokenizer=snake_case ,scheduler=snake_case ,learned_classifier_free_sampling_embeddings=snake_case ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = len(snake_case ) if isinstance(snake_case ,snake_case ) else 1
# get prompt text embeddings
lowercase : Tuple = self.tokenizer(
snake_case ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
lowercase : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowercase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowercase : Optional[int] = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=snake_case )
# duplicate text embeddings for each generation per prompt
lowercase : Optional[int] = prompt_embeds.repeat_interleave(snake_case ,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowercase : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
lowercase : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(snake_case ,1 ,1 )
else:
lowercase : Optional[int] = [''] * batch_size
lowercase : Optional[int] = text_input_ids.shape[-1]
lowercase : List[Any] = self.tokenizer(
snake_case ,padding="""max_length""" ,max_length=snake_case ,truncation=snake_case ,return_tensors="""pt""" ,)
lowercase : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowercase : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase : List[str] = negative_prompt_embeds.shape[1]
lowercase : int = negative_prompt_embeds.repeat(1 ,snake_case ,1 )
lowercase : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,snake_case ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self ,snake_case ,snake_case = 100 ,snake_case = 5.0 ,snake_case = 1.0 ,snake_case = 1 ,snake_case = None ,snake_case = None ,snake_case = "pil" ,snake_case = True ,snake_case = None ,snake_case = 1 ,):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
lowercase : str = 1
elif isinstance(snake_case ,snake_case ):
lowercase : List[Any] = len(snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case )}" )
lowercase : str = batch_size * num_images_per_prompt
lowercase : List[Any] = guidance_scale > 1.0
lowercase : List[str] = self._encode_prompt(snake_case ,snake_case ,snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case ,snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case )}." )
# get the initial completely masked latents unless the user supplied it
lowercase : Tuple = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowercase : Tuple = self.transformer.num_vector_embeds - 1
lowercase : List[Any] = torch.full(snake_case ,snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
lowercase : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case ,device=self.device )
lowercase : int = self.scheduler.timesteps.to(self.device )
lowercase : Tuple = latents
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the sample if we are doing classifier free guidance
lowercase : Optional[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowercase : str = self.transformer(snake_case ,encoder_hidden_states=snake_case ,timestep=snake_case ).sample
if do_classifier_free_guidance:
lowercase : str = model_output.chunk(2 )
lowercase : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(snake_case ,dim=1 ,keepdim=snake_case )
lowercase : Dict = self.truncate(snake_case ,snake_case )
# remove `log(0)`'s (`-inf`s)
lowercase : Optional[int] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowercase : Any = self.scheduler.step(snake_case ,timestep=snake_case ,sample=snake_case ,generator=snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case ,snake_case ,snake_case )
lowercase : Optional[int] = self.vqvae.config.vq_embed_dim
lowercase : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowercase : Optional[int] = self.vqvae.quantize.get_codebook_entry(snake_case ,shape=snake_case )
lowercase : int = self.vqvae.decode(snake_case ,force_not_quantize=snake_case ).sample
lowercase : Any = (image / 2 + 0.5).clamp(0 ,1 )
lowercase : Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase : Union[str, Any] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : int = torch.sort(snake_case ,1 ,descending=snake_case )
lowercase : int = torch.exp(snake_case )
lowercase : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowercase : List[str] = torch.full_like(keep_mask[:, 0:1, :] ,snake_case )
lowercase : Dict = torch.cat((all_true, keep_mask) ,dim=1 )
lowercase : List[Any] = keep_mask[:, :-1, :]
lowercase : str = keep_mask.gather(1 ,indices.argsort(1 ) )
lowercase : List[str] = log_p_x_0.clone()
lowercase : List[str] = -torch.inf # -inf = log(0)
return rv
| 362
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE__ ) as metadata_file:
lowercase : Union[str, Any] = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowercase : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
lowercase : str = load_original_entity_vocab(SCREAMING_SNAKE_CASE__ )
# add an entry for [MASK2]
lowercase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowercase : Dict = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase : List[Any] = AddedToken("""<ent>""" , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
lowercase : int = AddedToken("""<ent2>""" , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """tokenizer_config.json""" ) , """r""" ) as f:
lowercase : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = """MLukeTokenizer"""
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Initialize the embeddings of the special tokens
lowercase : Dict = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
lowercase : Dict = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
lowercase : int = state_dict["""embeddings.word_embeddings.weight"""]
lowercase : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 )
lowercase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
lowercase : str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowercase : List[Any] = state_dict[bias_name]
lowercase : Any = decoder_bias[ent_init_index].unsqueeze(0 )
lowercase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
lowercase : int = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase : Union[str, Any] = f"encoder.layer.{layer_index}.attention.self."
lowercase : List[str] = state_dict[prefix + matrix_name]
lowercase : Any = state_dict[prefix + matrix_name]
lowercase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase : Any = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowercase : Tuple = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowercase : Optional[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowercase : Optional[Any] = state_dict["""entity_predictions.bias"""]
lowercase : str = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowercase : List[str] = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowercase : List[str] = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
lowercase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
lowercase : List[Any] = state_dict[key]
else:
lowercase : Union[str, Any] = state_dict[key]
lowercase , lowercase : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if set(SCREAMING_SNAKE_CASE__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(SCREAMING_SNAKE_CASE__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowercase : str = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task="""entity_classification""" )
lowercase : str = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowercase : str = (0, 9)
lowercase : Dict = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors="""pt""" )
lowercase : Any = model(**SCREAMING_SNAKE_CASE__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase : List[Any] = torch.Size((1, 33, 768) )
lowercase : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase : Optional[int] = torch.Size((1, 1, 768) )
lowercase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowercase : Any = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = """Tokyo is the capital of <mask>."""
lowercase : List[Any] = (24, 30)
lowercase : int = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors="""pt""" )
lowercase : Dict = model(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = encoding["""input_ids"""][0].tolist()
lowercase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
lowercase : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
lowercase : int = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(SCREAMING_SNAKE_CASE__ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Optional[int] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowercase : List[str] = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in open(SCREAMING_SNAKE_CASE__ )]
lowercase : int = {}
for entry in data:
lowercase : Optional[Any] = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowercase : Optional[Any] = entity_id
break
lowercase : List[Any] = f"{language}:{entity_name}"
lowercase : Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowercase : str = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 285
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase_ ( ):
lowercase__ : Dict = torch.nn.Linear(2 , 4)
lowercase__ : Tuple = torch.optim.AdamW(model.parameters() , lr=1.0)
lowercase__ : Optional[int] = torch.optim.lr_scheduler.OneCycleLR(__UpperCamelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1)
lowercase__ : Any = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))
lowercase__ : Any = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase_ ( _lowerCamelCase : Tuple):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict()
model.load_state_dict(__UpperCamelCase)
class snake_case_ ( snake_case_ ):
@require_cuda
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
lowercase__ : Dict = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowercase_ ):
lowercase__ : Any = Accelerator(cpu=lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
lowercase__ : Union[str, Any] = Accelerator()
lowercase__ : Optional[int] = GradientState()
assert state.num_steps == 1
lowercase__ : Dict = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowercase__ : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __UpperCamelCase ( self : List[str] ) -> Dict:
lowercase__ : Any = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = create_components()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __UpperCamelCase ( self : Dict ) -> Tuple:
lowercase__ : Dict = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = create_components()
accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __UpperCamelCase ( self : List[str] ) -> Dict:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowercase_ : Dict , **lowercase_ : List[Any] ):
pass
with patch("torch.cuda.set_device" , lowercase_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
lowercase__ : Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
lowercase__ : str = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = create_components()
accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = get_signature(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase_ )
# make sure random weights don't match
load_random_weights(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) < 1E-3 )
def __UpperCamelCase ( self : Tuple ) -> Dict:
lowercase__ : List[str] = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : int = create_components()
accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = get_signature(lowercase_ )
# saving hook
def save_config(lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
lowercase__ : Union[str, Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowercase_ , "data.json" ) , "w" ) as f:
json.dump(lowercase_ , lowercase_ )
# loading hook
def load_config(lowercase_ : Optional[int] , lowercase_ : str ):
with open(os.path.join(lowercase_ , "data.json" ) , "r" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Any = config["class_name"]
lowercase__ : Optional[int] = accelerator.register_save_state_pre_hook(lowercase_ )
lowercase__ : Optional[Any] = accelerator.register_load_state_pre_hook(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase_ )
# make sure random weights don't match with hooks
load_random_weights(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowercase__ : Tuple = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase_ )
# make sure random weights don't match with hooks removed
load_random_weights(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowercase__ : List[str] = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __UpperCamelCase ( self : Dict ) -> Dict:
lowercase__ : List[str] = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = create_components()
lowercase__ : Dict = None
# This should work
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.assertTrue(dummy_obj is None )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : List[Any] = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = create_components()
lowercase__ : int = [1, 2, 3]
# This should work
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
from transformers import AutoModelForCausalLM
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase_ , device_map={"": 0} , )
lowercase__ : int = Accelerator()
# This should work
lowercase__ : List[str] = accelerator.prepare(lowercase_ )
@slow
@require_bnb
def __UpperCamelCase ( self : List[str] ) -> List[str]:
from transformers import AutoModelForCausalLM
lowercase__ : Tuple = Accelerator()
with init_empty_weights():
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowercase__ : Dict = infer_auto_device_map(lowercase_ )
lowercase__ : Optional[Any] = "cpu"
lowercase__ : List[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowercase_ , load_in_abit=lowercase_ , llm_inta_enable_fpaa_cpu_offload=lowercase_ )
# This should not work and get value error
with self.assertRaises(lowercase_ ):
lowercase__ : Union[str, Any] = accelerator.prepare(lowercase_ )
@slow
@require_bnb
@require_multi_gpu
def __UpperCamelCase ( self : Any ) -> List[str]:
from transformers import AutoModelForCausalLM
lowercase__ : int = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowercase__ : Union[str, Any] = infer_auto_device_map(lowercase_ )
lowercase__ : List[Any] = 1
lowercase__ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase_ , device_map=lowercase_ , )
lowercase__ : str = Accelerator()
# This should not work and get value error
with self.assertRaises(lowercase_ ):
lowercase__ : Tuple = accelerator.prepare(lowercase_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowercase__ : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
lowercase__ : Any = infer_auto_device_map(lowercase_ )
lowercase__ : int = 1
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase_ , device_map=lowercase_ , )
lowercase__ : str = Accelerator()
# This should work
lowercase__ : Optional[Any] = accelerator.prepare(lowercase_ )
@require_cuda
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
lowercase__ : Union[str, Any] = torch.nn.Linear(10 , 10 )
lowercase__ : int = torch.optim.SGD(model.parameters() , lr=0.01 )
lowercase__ : int = Accelerator(cpu=lowercase_ )
lowercase__ : List[Any] = accelerator.prepare(lowercase_ )
| 87
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :int = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 312
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Any , A : int , A : Optional[int]=7 , A : Dict=3 , A : Any=1_8 , A : List[str]=3_0 , A : int=4_0_0 , A : Any=True , A : List[str]=None , A : Optional[int]=True , A : Union[str, Any]=None , ) ->Optional[int]:
lowerCamelCase__ : Dict = size if size is not None else {'''shortest_edge''': 2_0}
lowerCamelCase__ : str = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : Optional[Any] = min_resolution
lowerCamelCase__ : Optional[int] = max_resolution
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Union[str, Any] = size
lowerCamelCase__ : Optional[int] = do_center_crop
lowerCamelCase__ : List[Any] = crop_size
def __lowerCamelCase ( self : Optional[int] ) ->Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : str = MobileNetVaImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Any ) ->Optional[Any]:
lowerCamelCase__ : Optional[int] = MobileNetVaImageProcessingTester(self )
@property
def __lowerCamelCase ( self : str ) ->Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : str ) ->Optional[Any]:
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_center_crop''' ) )
self.assertTrue(hasattr(A , '''crop_size''' ) )
def __lowerCamelCase ( self : Union[str, Any] ) ->Dict:
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def __lowerCamelCase ( self : Tuple ) ->Any:
pass
def __lowerCamelCase ( self : Union[str, Any] ) ->Tuple:
# Initialize image_processing
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase__ : Dict = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCamelCase ( self : List[str] ) ->List[Any]:
# Initialize image_processing
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCamelCase ( self : Optional[Any] ) ->Any:
# Initialize image_processing
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCamelCase__ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 265
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A : Tuple = logging.get_logger(__name__)
_A : List[str] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ):
_UpperCAmelCase : Dict = "nat"
_UpperCAmelCase : Tuple = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[Any] , A : Union[str, Any]=4 , A : str=3 , A : List[Any]=6_4 , A : Optional[Any]=[3, 4, 6, 5] , A : int=[2, 4, 8, 1_6] , A : Optional[int]=7 , A : List[Any]=3.0 , A : str=True , A : str=0.0 , A : Any=0.0 , A : int=0.1 , A : Tuple="gelu" , A : List[Any]=0.02 , A : str=1e-5 , A : Optional[int]=0.0 , A : Optional[Any]=None , A : Dict=None , **A : str , ) ->Union[str, Any]:
super().__init__(**A )
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : Any = embed_dim
lowerCamelCase__ : str = depths
lowerCamelCase__ : Union[str, Any] = len(A )
lowerCamelCase__ : int = num_heads
lowerCamelCase__ : Optional[int] = kernel_size
lowerCamelCase__ : Optional[int] = mlp_ratio
lowerCamelCase__ : List[Any] = qkv_bias
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = drop_path_rate
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = layer_norm_eps
lowerCamelCase__ : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ : str = int(embed_dim * 2 ** (len(A ) - 1) )
lowerCamelCase__ : Dict = layer_scale_init_value
lowerCamelCase__ : Dict = ['''stem'''] + [F"stage{idx}" for idx in range(1 , len(A ) + 1 )]
lowerCamelCase__ , lowerCamelCase__ : str = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
| 265
| 1
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase__ : List[str] = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : Union[str, Any] = 3
lowerCAmelCase_ : List[str] = (3_2, 3_2)
lowerCAmelCase_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = torch.tensor([1_0] ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : int = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
lowerCAmelCase_ : List[str] = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : Union[str, Any] = (3_2, 3_2)
lowerCAmelCase_ : Any = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = torch.tensor([1_0] ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (4, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return (4, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : int = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
lowerCAmelCase_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ ,lowerCAmelCase_ : Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowerCAmelCase_ ,lowerCAmelCase_ : int = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE_ )
model_accelerate.to(SCREAMING_SNAKE_CASE_ )
model_accelerate.eval()
lowerCAmelCase_ : Optional[int] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase_ : Tuple = noise.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = torch.tensor([1_0] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model_accelerate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=SCREAMING_SNAKE_CASE_ , low_cpu_mem_usage=SCREAMING_SNAKE_CASE_ )
model_normal_load.to(SCREAMING_SNAKE_CASE_ )
model_normal_load.eval()
lowerCAmelCase_ : int = model_normal_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['sample']
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Tuple = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase_ : Union[str, Any] = noise.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = torch.tensor([1_0] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase_ : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase_ : Dict = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 ) )
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict=(3_2, 3_2) ):
lowerCAmelCase_ : str = 4
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
lowerCAmelCase_ : List[str] = self.dummy_input
return init_dict, inputs_dict
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = self.dummy_input
lowerCAmelCase_ : Dict = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = noise
lowerCAmelCase_ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
assert image is not None, "Make sure output is not None"
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : List[Any] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = 4
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : Any = (2_5_6, 2_5_6)
lowerCAmelCase_ : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase_ : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase_ : int = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = 4
lowerCAmelCase_ : Dict = 3
lowerCAmelCase_ : List[str] = (3_2, 3_2)
lowerCAmelCase_ : Any = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
lowerCAmelCase_ : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase_ : List[str] = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
def SCREAMING_SNAKE_CASE__ ( self : int ):
# not required for this model
pass
| 224
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Any = '<pad>'
lowerCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : int = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : List[str] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : List[str] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name )
lowerCAmelCase_ : Tuple = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = pickle.dumps(SCREAMING_SNAKE_CASE_ )
pickle.loads(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : Dict = self.get_rust_tokenizer()
lowerCAmelCase_ : Tuple = 'I was born in 92000, and this is falsé.'
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self.get_rust_tokenizer()
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Any = 'Hello World!'
lowerCAmelCase_ : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase_ : int = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
# fmt: off
lowerCAmelCase_ : List[str] = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 224
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase = [2, 4, 6, 8, 1_0, 1_2]
__lowercase = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ ,lowercase__ ,lowercase__ ) ,2_1_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.assertRaisesRegex(lowercase__ ,'''Weight can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.assertRaisesRegex(lowercase__ ,'''Profit can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.assertRaisesRegex(
lowercase__ ,'''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCamelCase ( A ):
'''simple docstring'''
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'tf_padding'))
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'depth_multiplier'))
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=1_3 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : int=3_2 , _lowerCAmelCase : List[Any]=0.25 , _lowerCAmelCase : Tuple=8 , _lowerCAmelCase : List[str]=8 , _lowerCAmelCase : List[Any]=6 , _lowerCAmelCase : List[str]=3_2 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : int="relu6" , _lowerCAmelCase : str=1_2_8_0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=1_0 , _lowerCAmelCase : Any=None , ):
'''simple docstring'''
__lowercase =parent
__lowercase =batch_size
__lowercase =num_channels
__lowercase =image_size
__lowercase =depth_multiplier
__lowercase =depth_divisible_by
__lowercase =min_depth
__lowercase =expand_ratio
__lowercase =tf_padding
__lowercase =output_stride
__lowercase =first_layer_is_expansion
__lowercase =finegrained_output
__lowercase =hidden_act
__lowercase =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
__lowercase =classifier_dropout_prob
__lowercase =use_labels
__lowercase =is_training
__lowercase =num_labels
__lowercase =initializer_range
__lowercase =scope
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowercase =None
__lowercase =None
if self.use_labels:
__lowercase =ids_tensor([self.batch_size] , self.num_labels)
__lowercase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__lowercase =self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCamelCase ( self : int):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =MobileNetVaModel(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =model(_lowerCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int):
'''simple docstring'''
__lowercase =self.num_labels
__lowercase =MobileNetVaForImageClassification(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =model(_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =self.num_labels
__lowercase =MobileNetVaForSemanticSegmentation(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =model(_lowerCAmelCase)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase =model(_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase =config_and_inputs
__lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( A , A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileNetVaModel,
"""image-classification""": MobileNetVaForImageClassification,
"""image-segmentation""": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =MobileNetVaModelTester(self)
__lowercase =MobileNetVaConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds')
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions')
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(_lowerCAmelCase)
__lowercase =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase =[*signature.parameters.keys()]
__lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase)
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple):
__lowercase =model_class(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
with torch.no_grad():
__lowercase =model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase))
__lowercase =outputs.hidden_states
__lowercase =1_6
self.assertEqual(len(_lowerCAmelCase) , _lowerCAmelCase)
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase)
@slow
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase =MobileNetVaModel.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
def _A ( ):
"""simple docstring"""
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCamelCase ( self : str):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224') if is_vision_available() else None
)
@slow
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224').to(_lowerCAmelCase)
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(images=_lowerCAmelCase , return_tensors='pt').to(_lowerCAmelCase)
# forward pass
with torch.no_grad():
__lowercase =model(**_lowerCAmelCase)
# verify the logits
__lowercase =torch.Size((1, 1_0_0_1))
self.assertEqual(outputs.logits.shape , _lowerCAmelCase)
__lowercase =torch.tensor([0.2445, -1.1993, 0.1905]).to(_lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4))
@slow
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
__lowercase =model.to(_lowerCAmelCase)
__lowercase =MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513')
__lowercase =prepare_img()
__lowercase =image_processor(images=_lowerCAmelCase , return_tensors='pt').to(_lowerCAmelCase)
# forward pass
with torch.no_grad():
__lowercase =model(**_lowerCAmelCase)
__lowercase =outputs.logits
# verify the logits
__lowercase =torch.Size((1, 2_1, 6_5, 6_5))
self.assertEqual(logits.shape , _lowerCAmelCase)
__lowercase =torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1e-4))
| 166
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AlbertTokenizer
lowerCAmelCase__ = AlbertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase =AlbertTokenizer(_lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase ='this is a test'
__lowercase ='this is a test'
return input_text, output_text
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase ='<pad>'
__lowercase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase) , _lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<pad>')
self.assertEqual(vocab_keys[1] , '<unk>')
self.assertEqual(vocab_keys[-1] , '▁eloquent')
self.assertEqual(len(_lowerCAmelCase) , 3_0_0_0_0)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0)
def __lowerCamelCase ( self : int):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowercase =self.get_tokenizer()
__lowercase =self.get_rust_tokenizer()
__lowercase ='I was born in 92000, and this is falsé.'
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
__lowercase =rust_tokenizer.tokenize(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =self.get_rust_tokenizer()
__lowercase =tokenizer.encode(_lowerCAmelCase)
__lowercase =rust_tokenizer.encode(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =AlbertTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase)
__lowercase =tokenizer.tokenize('This is a test')
self.assertListEqual(_lowerCAmelCase , ['▁this', '▁is', '▁a', '▁test'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [4_8, 2_5, 2_1, 1_2_8_9])
__lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_lowerCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'])
__lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9])
__lowercase =tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(
_lowerCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =AlbertTokenizer(_lowerCAmelCase)
__lowercase =tokenizer.encode('sequence builders')
__lowercase =tokenizer.encode('multi-sequence build')
__lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase)
__lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase ={'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 166
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase : str = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Dict = 48
UpperCAmelCase : Union[str, Any] = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase : Tuple = [6, 6, 6, 6]
UpperCAmelCase : Optional[Any] = 60
UpperCAmelCase : Union[str, Any] = [6, 6, 6, 6]
UpperCAmelCase : Tuple = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase : Union[str, Any] = 4
UpperCAmelCase : Optional[int] = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : Union[str, Any] = 1_26
UpperCAmelCase : str = 7
UpperCAmelCase : List[str] = 255.0
UpperCAmelCase : Optional[int] = ''
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if "patch_embed.proj" in name and "layers" not in name:
UpperCAmelCase : List[str] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
UpperCAmelCase : List[str] = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
UpperCAmelCase : Any = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
UpperCAmelCase : Union[str, Any] = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
UpperCAmelCase : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCAmelCase : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCAmelCase : List[str] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCAmelCase : List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCAmelCase : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCAmelCase : Tuple = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
UpperCAmelCase : Dict = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
UpperCAmelCase : str = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
UpperCAmelCase : List[str] = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
UpperCAmelCase : Union[str, Any] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
UpperCAmelCase : Optional[Any] = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
UpperCAmelCase : List[str] = 'layernorm.weight'
if name == "norm.bias":
UpperCAmelCase : int = 'layernorm.bias'
if "conv_first" in name:
UpperCAmelCase : Union[str, Any] = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCAmelCase : Dict = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCAmelCase : int = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
UpperCAmelCase : Optional[Any] = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
UpperCAmelCase : List[str] = name.replace('upsample.2' , 'upsample.convolution_1' )
UpperCAmelCase : List[Any] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
UpperCAmelCase : Tuple = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
UpperCAmelCase : Dict = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
UpperCAmelCase : List[Any] = 'swin2sr.' + name
return name
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase : int = orig_state_dict.pop(UpperCAmelCase_ )
if "qkv" in key:
UpperCAmelCase : Optional[int] = key.split('.' )
UpperCAmelCase : Tuple = int(key_split[1] )
UpperCAmelCase : Union[str, Any] = int(key_split[4] )
UpperCAmelCase : Any = config.embed_dim
if "weight" in key:
UpperCAmelCase : Tuple = val[:dim, :]
UpperCAmelCase : List[str] = val[dim : dim * 2, :]
UpperCAmelCase : List[Any] = val[-dim:, :]
else:
UpperCAmelCase : str = val[:dim]
UpperCAmelCase : Any = val[dim : dim * 2]
UpperCAmelCase : Optional[Any] = val[-dim:]
pass
else:
UpperCAmelCase : Tuple = val
return orig_state_dict
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = get_config(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = SwinaSRForImageSuperResolution(UpperCAmelCase_ )
model.eval()
UpperCAmelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='cpu' )
UpperCAmelCase : Tuple = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(UpperCAmelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
UpperCAmelCase : Optional[Any] = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
UpperCAmelCase : Any = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert('RGB' )
UpperCAmelCase : Dict = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCAmelCase : Optional[Any] = 1_26 if 'Jpeg' in checkpoint_url else 2_56
UpperCAmelCase : List[str] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
UpperCAmelCase : Union[str, Any] = transforms(UpperCAmelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCAmelCase : Dict = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCAmelCase : List[Any] = model(UpperCAmelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCAmelCase : Any = torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase : str = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase : int = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCAmelCase : List[Any] = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase : Any = torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase : Optional[Any] = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase : str = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCAmelCase_ , atol=1E-3 )
print('Looks ok!' )
UpperCAmelCase : List[Any] = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
UpperCAmelCase : Optional[int] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
lowercase__ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 361
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 280
| 0
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase__ = TypeVar('''T''')
UpperCAmelCase__ = Union[List[T], Tuple[T, ...]]
UpperCAmelCase__ = Union[T, List[T], Dict[str, T]]
UpperCAmelCase__ = Union[str, bytes, os.PathLike]
| 5
|
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
| 0
|
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 1
|
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=A ).to(A )
UpperCAmelCase : str = AutoTokenizer.from_pretrained("""google/mt5-small""" )
UpperCAmelCase : Optional[int] = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
UpperCAmelCase : Dict = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
UpperCAmelCase : List[str] = model(input_ids.to(A ) , labels=labels.to(A ) ).loss
UpperCAmelCase : Optional[int] = -(labels.shape[-1] * loss.item())
UpperCAmelCase : Union[str, Any] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 265
|
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 265
| 1
|
from __future__ import annotations
def __lowercase ( lowerCamelCase : str , lowerCamelCase : list[str] | None = None ):
UpperCamelCase_ : Any = word_bank or []
# create a table
UpperCamelCase_ : int = len(lowerCamelCase ) + 1
UpperCamelCase_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase ):
table.append([] )
# seed value
UpperCamelCase_ : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase )] == word:
UpperCamelCase_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase )]:
combination.reverse()
return table[len(lowerCamelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 356
|
def __lowercase ( lowerCamelCase : list[int] ):
if not numbers:
return 0
if not isinstance(lowerCamelCase , (list, tuple) ) or not all(
isinstance(lowerCamelCase , lowerCamelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
UpperCamelCase_ : Optional[Any] = numbers[0]
for i in range(1 , len(lowerCamelCase ) ):
# update the maximum and minimum subarray products
UpperCamelCase_ : Tuple = numbers[i]
if number < 0:
UpperCamelCase_, UpperCamelCase_ : List[str] = min_till_now, max_till_now
UpperCamelCase_ : List[str] = max(lowerCamelCase , max_till_now * number )
UpperCamelCase_ : Dict = min(lowerCamelCase , min_till_now * number )
# update the maximum product found till now
UpperCamelCase_ : List[str] = max(lowerCamelCase , lowerCamelCase )
return max_prod
| 50
| 0
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCamelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCamelCase : Optional[Any] = "xvjiarui/stable-diffusion-2-inpainting"
UpperCamelCase , UpperCamelCase : Tuple = FlaxStableDiffusionInpaintPipeline.from_pretrained(A_ , safety_checker=A_ )
UpperCamelCase : Union[str, Any] = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase : List[Any] = jax.random.PRNGKey(0 )
UpperCamelCase : Optional[Any] = 50
UpperCamelCase : List[str] = jax.device_count()
UpperCamelCase : Optional[int] = num_samples * [prompt]
UpperCamelCase : str = num_samples * [init_image]
UpperCamelCase : Optional[Any] = num_samples * [mask_image]
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = pipeline.prepare_inputs(A_ , A_ , A_ )
# shard inputs and rng
UpperCamelCase : List[str] = replicate(A_ )
UpperCamelCase : Any = jax.random.split(A_ , jax.device_count() )
UpperCamelCase : Dict = shard(A_ )
UpperCamelCase : Any = shard(A_ )
UpperCamelCase : Tuple = shard(A_ )
UpperCamelCase : int = pipeline(
A_ , A_ , A_ , A_ , A_ , A_ , jit=A_ )
UpperCamelCase : Optional[Any] = output.images.reshape(A_ , 512 , 512 , 3 )
UpperCamelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
UpperCamelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase : List[Any] = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 52
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCamelCase : List[Any] = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__lowerCamelCase : Optional[int] = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
__lowerCamelCase : str = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def A_ ( _lowerCAmelCase ) -> str:
def remove_articles(_lowerCAmelCase ):
UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase ):
UpperCamelCase : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase )
UpperCamelCase : List[Any] = Counter()
for sgram, scount in sgramcounter.items():
UpperCamelCase : Tuple = scount * numref
UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCamelCase : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
UpperCamelCase : Dict = ccount * numref
# KEEP
UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep
UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter
UpperCamelCase : Dict = sgramcounter_rep & rgramcounter
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Any = 1
UpperCamelCase : Any = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCamelCase : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep
UpperCamelCase : str = delgramcounter_rep - rgramcounter
UpperCamelCase : Any = sgramcounter_rep - rgramcounter
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Dict = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Tuple = 1
UpperCamelCase : Tuple = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
UpperCamelCase : int = len(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = ssent.split(" " )
UpperCamelCase : Dict = csent.split(" " )
UpperCamelCase : str = []
UpperCamelCase : Any = []
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : str = []
UpperCamelCase : str = []
UpperCamelCase : Dict = []
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Tuple = []
for rsent in rsents:
UpperCamelCase : List[Any] = rsent.split(" " )
UpperCamelCase : List[str] = []
UpperCamelCase : int = []
UpperCamelCase : Tuple = []
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCamelCase : Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase )
elif tokenizer == "penn":
UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase )
else:
UpperCamelCase : Union[str, Any] = sentence
if not return_str:
UpperCamelCase : Tuple = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
UpperCamelCase : Optional[Any] = 0
for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] )
UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase )
return 100 * sari_score
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]:
UpperCamelCase : Optional[Any] = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
UpperCamelCase : Tuple = sacrebleu.corpus_bleu(
_lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {}
result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} )
result.update({"exact": compute_em(predictions=A_ , references=A_ )} )
return result
| 52
| 1
|
"""simple docstring"""
class snake_case :
def __init__( self : Optional[Any] ):
'''simple docstring'''
a : List[str] = ''
a : Tuple = ''
a : Dict = []
def lowerCamelCase__ ( self : str , A : int , A : int ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
a : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
a : Union[str, Any] = self.__min_dist_top_down_dp(A , n - 1 )
a : Dict = self.__min_dist_top_down_dp(m - 1 , A )
a : Dict = self.__min_dist_top_down_dp(m - 1 , n - 1 )
a : int = 1 + min(A , A , A )
return self.dp[m][n]
def lowerCamelCase__ ( self : Union[str, Any] , A : str , A : str ):
'''simple docstring'''
a : Union[str, Any] = worda
a : List[Any] = worda
a : List[str] = [[-1 for _ in range(len(A ) )] for _ in range(len(A ) )]
return self.__min_dist_top_down_dp(len(A ) - 1 , len(A ) - 1 )
def lowerCamelCase__ ( self : Optional[Any] , A : str , A : str ):
'''simple docstring'''
a : str = worda
a : Optional[Any] = worda
a : Optional[int] = len(A )
a : str = len(A )
a : Optional[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
a : Optional[Any] = j
elif j == 0: # second string is empty
a : Tuple = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
a : Optional[int] = self.dp[i - 1][j - 1]
else:
a : Optional[Any] = self.dp[i][j - 1]
a : List[str] = self.dp[i - 1][j]
a : List[Any] = self.dp[i - 1][j - 1]
a : Tuple = 1 + min(A , A , A )
return self.dp[m][n]
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
_UpperCamelCase : Union[str, Any] = input('Enter the first string: ').strip()
_UpperCamelCase : Any = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 186
|
"""simple docstring"""
_UpperCamelCase : List[Any] = 8.31_44_62 # Unit - J mol-1 K-1
def snake_case (A_ :float , A_ :float , A_ :float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def snake_case (A_ :float , A_ :float , A_ :float ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 186
| 1
|
from __future__ import annotations
import math
def a ( A__ : Union[str, Any] , A__ : List[Any] , A__ : List[str] , A__ : Any , A__ : Dict ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(A__ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , A__ , A__ , A__ ) , minimax(depth + 1 , node_index * 2 + 1 , A__ , A__ , A__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , A__ , A__ , A__ ) , minimax(depth + 1 , node_index * 2 + 1 , A__ , A__ , A__ ) , )
def a ( ) -> None:
"""simple docstring"""
_lowercase =[90, 23, 6, 33, 21, 65, 123, 34423]
_lowercase =math.log(len(A__ ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , A__ , A__ , A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 205
|
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280
| 0
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _UpperCAmelCase :
def __init__( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[int]=13 , lowercase_ : int=7 , lowercase_ : Dict=True , lowercase_ : str=True , lowercase_ : int=99 , lowercase_ : List[str]=32 , lowercase_ : Optional[Any]=5 , lowercase_ : List[str]=4 , lowercase_ : Tuple=37 , lowercase_ : Dict="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : int=50 , lowercase_ : int=0.02 , lowercase_ : str=True , lowercase_ : Any=None , ):
snake_case_ : Optional[int] = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Optional[Any] = seq_length
snake_case_ : Tuple = is_training
snake_case_ : Tuple = use_input_mask
snake_case_ : Optional[int] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = max_position_embeddings
snake_case_ : Tuple = initializer_range
snake_case_ : List[Any] = use_labels
snake_case_ : Optional[int] = scope
def _snake_case ( self : Optional[int] ):
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : List[str] = None
if self.use_input_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def _snake_case ( self : Union[str, Any] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def _snake_case ( self : Optional[Any] ):
(
snake_case_
) : List[str] = self.prepare_config_and_inputs()
snake_case_ : str = True
snake_case_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Any , **lowercase_ : Optional[Any] , ):
snake_case_ : str = BertGenerationEncoder(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
snake_case_ : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
snake_case_ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : str , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , **lowercase_ : str , ):
snake_case_ : Tuple = True
snake_case_ : Tuple = BertGenerationEncoder(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
snake_case_ : str = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )
snake_case_ : Optional[Any] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : List[str] , **lowercase_ : str , ):
snake_case_ : List[str] = True
snake_case_ : int = True
snake_case_ : Tuple = BertGenerationDecoder(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval()
# first forward pass
snake_case_ : Tuple = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ , )
snake_case_ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ : int = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["""hidden_states"""][0]
snake_case_ : Any = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , )["""hidden_states"""][0]
# select random slice
snake_case_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def _snake_case ( self : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : int , lowercase_ : str , *lowercase_ : Dict , ):
snake_case_ : List[Any] = BertGenerationDecoder(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
snake_case_ : Dict = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Optional[int] ):
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase):
_lowerCAmelCase : Optional[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_lowerCAmelCase : Optional[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
_lowerCAmelCase : Optional[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _snake_case ( self : Any ):
snake_case_ : Optional[int] = BertGenerationEncoderTester(self )
snake_case_ : int = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def _snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
snake_case_ : Dict = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def _snake_case ( self : List[Any] ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase_ )
def _snake_case ( self : List[Any] ):
# This regression test was failing with PyTorch < 1.3
(
snake_case_
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
def _snake_case ( self : Any ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase_ )
@slow
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Optional[int] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : str = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
snake_case_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
snake_case_ : List[str] = model(lowerCamelCase_ )[0]
snake_case_ : Union[str, Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase_ )
snake_case_ : Union[str, Any] = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : str ):
snake_case_ : Tuple = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
snake_case_ : str = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(lowerCamelCase_ )[0]
snake_case_ : Tuple = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase_ )
snake_case_ : List[Any] = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 365
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _UpperCAmelCase :
def __init__( self : int , lowercase_ : int , lowercase_ : int=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Dict=True , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=True , lowercase_ : List[str]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : Dict=5 , lowercase_ : Dict=4 , lowercase_ : Optional[int]=37 , lowercase_ : str="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : int=16 , lowercase_ : List[str]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[str]=None , ):
snake_case_ : Optional[int] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : List[str] = use_input_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : str = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : int = type_sequence_label_size
snake_case_ : Tuple = initializer_range
snake_case_ : Any = num_labels
snake_case_ : Dict = num_choices
snake_case_ : str = scope
def _snake_case ( self : Dict ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : List[str] = None
if self.use_input_mask:
snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = None
snake_case_ : str = None
snake_case_ : Any = None
if self.use_labels:
snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : List[str] ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _snake_case ( self : Any , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Any ):
snake_case_ : List[Any] = OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Any = model(lowercase_ , attention_mask=lowercase_ )
snake_case_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict , ):
snake_case_ : List[str] = True
snake_case_ : Tuple = OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
snake_case_ : str = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
snake_case_ : Any = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Dict , lowercase_ : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : List[str] , ):
snake_case_ : Optional[int] = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : str = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : int , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , ):
snake_case_ : int = True
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
snake_case_ : List[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
snake_case_ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ : int = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
snake_case_ : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
# select random slice
snake_case_ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : List[str] = config_and_inputs
snake_case_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Optional[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowerCAmelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : Union[str, Any] = False
def _snake_case ( self : List[Any] ):
snake_case_ : Any = OpenLlamaModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self : List[Any] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : List[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : Tuple = type
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_, snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Dict = 3
snake_case_ : Dict = input_dict['''input_ids''']
snake_case_ : int = input_ids.ne(1 ).to(lowercase_ )
snake_case_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ : Tuple = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : Union[str, Any] ):
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Dict = 3
snake_case_ : str = '''single_label_classification'''
snake_case_ : Tuple = input_dict['''input_ids''']
snake_case_ : Optional[int] = input_ids.ne(1 ).to(lowercase_ )
snake_case_ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ : Union[str, Any] = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : Optional[Any] ):
snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = 3
snake_case_ : Optional[Any] = '''multi_label_classification'''
snake_case_ : Tuple = input_dict['''input_ids''']
snake_case_ : str = input_ids.ne(1 ).to(lowercase_ )
snake_case_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ : Any = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def _snake_case ( self : List[str] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _snake_case ( self : Tuple , lowercase_ : Dict ):
snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[str] = ids_tensor([1, 10] , config.vocab_size )
snake_case_ : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ : Any = OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
snake_case_ : Optional[Any] = original_model(lowercase_ ).last_hidden_state
snake_case_ : Optional[Any] = original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ : Dict = {'''type''': scaling_type, '''factor''': 10.0}
snake_case_ : Union[str, Any] = OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
snake_case_ : str = scaled_model(lowercase_ ).last_hidden_state
snake_case_ : List[str] = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
| 155
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.