code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
UpperCAmelCase_ = {}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase__ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase__ = _calculate(days - 1 , a_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase__ = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase__ = _calculate(days - 1 , a_ , 0 )
UpperCAmelCase__ = state_late + state_absent + state_ontime
UpperCAmelCase__ = prizestrings
return prizestrings
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[str] = 30 ):
'''simple docstring'''
return _calculate(a_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 346 |
import os
from datetime import datetime as dt
from github import Github
A_ :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def A ( ) -> Any:
__UpperCamelCase : Any =Github(os.environ['GITHUB_TOKEN'] )
__UpperCamelCase : Union[str, Any] =g.get_repo('huggingface/accelerate' )
__UpperCamelCase : Tuple =repo.get_issues(state='open' )
for issue in open_issues:
__UpperCamelCase : List[Any] =sorted([comment for comment in issue.get_comments()] ,key=lambda a_ : i.created_at ,reverse=a_ )
__UpperCamelCase : str =comments[0] if len(a_ ) > 0 else None
__UpperCamelCase : Any =dt.utcnow()
__UpperCamelCase : List[str] =(current_time - issue.updated_at).days
__UpperCamelCase : Union[str, Any] =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 71 | 0 |
'''simple docstring'''
import sys
def __lowerCamelCase ( A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = len(a__ )
UpperCamelCase = [[0 for x in range(a__ )] for x in range(a__ )]
UpperCamelCase = [[0 for x in range(a__ )] for x in range(a__ )]
for chain_length in range(2 , a__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase = a + chain_length - 1
UpperCamelCase = sys.maxsize
for c in range(a__ , a__ ):
UpperCamelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase = cost
UpperCamelCase = c
return matrix, sol
def __lowerCamelCase ( A__ , A__ , A__ ) -> Any:
"""simple docstring"""
if i == j:
print('A' + str(a__ ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(a__ , a__ , optimal_solution[i][j] )
print_optiomal_solution(a__ , optimal_solution[i][j] + 1 , a__ )
print(')' , end=' ' )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
UpperCamelCase = [30, 35, 15, 5, 10, 20, 25]
UpperCamelCase = len(a__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase , UpperCamelCase = matrix_chain_order(a__ )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(a__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 361 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = len(A__ ) // 2
# choose the middle 3 elements
UpperCamelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249 | 0 |
from ...configuration_utils import PretrainedConfig
__snake_case : Dict = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class A__(a_ ):
"""simple docstring"""
_A : Dict = '''tapas'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_024 , _lowercase=[3, 256, 256, 2, 256, 256, 10] , _lowercase=0.0_2 , _lowercase=1e-12 , _lowercase=0 , _lowercase=1_0.0 , _lowercase=0 , _lowercase=1.0 , _lowercase=None , _lowercase=1.0 , _lowercase=False , _lowercase=None , _lowercase=1.0 , _lowercase=1.0 , _lowercase=False , _lowercase=False , _lowercase="ratio" , _lowercase=None , _lowercase=None , _lowercase=64 , _lowercase=32 , _lowercase=False , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=True , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , ) -> int:
super().__init__(pad_token_id=_lowercase , **_lowercase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
a_ : Union[str, Any] = vocab_size
a_ : Optional[Any] = hidden_size
a_ : List[str] = num_hidden_layers
a_ : Tuple = num_attention_heads
a_ : Union[str, Any] = hidden_act
a_ : Dict = intermediate_size
a_ : Any = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : Any = max_position_embeddings
a_ : List[str] = type_vocab_sizes
a_ : Dict = initializer_range
a_ : Union[str, Any] = layer_norm_eps
# Fine-tuning task hyperparameters
a_ : int = positive_label_weight
a_ : Optional[Any] = num_aggregation_labels
a_ : Dict = aggregation_loss_weight
a_ : Optional[int] = use_answer_as_supervision
a_ : Dict = answer_loss_importance
a_ : Any = use_normalized_answer_loss
a_ : Union[str, Any] = huber_loss_delta
a_ : Union[str, Any] = temperature
a_ : int = aggregation_temperature
a_ : List[str] = use_gumbel_for_cells
a_ : Optional[Any] = use_gumbel_for_aggregation
a_ : Union[str, Any] = average_approximation_function
a_ : List[Any] = cell_selection_preference
a_ : Any = answer_loss_cutoff
a_ : Dict = max_num_rows
a_ : Any = max_num_columns
a_ : Tuple = average_logits_per_cell
a_ : Optional[int] = select_one_column
a_ : Tuple = allow_empty_column_selection
a_ : Any = init_cell_selection_weights_to_zero
a_ : Union[str, Any] = reset_position_index_per_cell
a_ : Optional[int] = disable_per_token_loss
# Aggregation hyperparameters
a_ : List[str] = aggregation_labels
a_ : Any = no_aggregation_label_index
if isinstance(self.aggregation_labels , _lowercase ):
a_ : List[str] = {int(_lowercase ): v for k, v in aggregation_labels.items()}
| 248 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Union[str, Any] = logging.get_logger(__name__)
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : Tuple = original_name.split(""".""")[0]
a_ : List[Any] = key.split(""".""")
a_ : List[Any] = int(key_list[key_list.index(a__) - 2])
a_ : Dict = int(key_list[key_list.index(a__) - 1])
a_ : Any = orig_block_num - offset
a_ : Optional[int] = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''')
return key
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = OrderedDict()
a_ , a_ : Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith("""network"""):
a_ : str = key.replace("""network""" , """poolformer.encoder""")
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""") and "patch_embed" not in key:
patch_emb_offset += 1
a_ : Tuple = key[: key.find("""proj""")]
a_ : Dict = key.replace(a__ , f'''patch_embeddings.{total_embed_found}.''')
a_ : Optional[Any] = key.replace("""proj""" , """projection""")
if key.endswith("""bias"""):
total_embed_found += 1
if "patch_embeddings" in key:
a_ : int = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
a_ : Union[str, Any] = replace_key_with_offset(a__ , a__ , """mlp.fc1""" , """output.conv1""")
if "mlp.fc2" in key:
a_ : str = replace_key_with_offset(a__ , a__ , """mlp.fc2""" , """output.conv2""")
if "norm1" in key:
a_ : str = replace_key_with_offset(a__ , a__ , """norm1""" , """before_norm""")
if "norm2" in key:
a_ : Any = replace_key_with_offset(a__ , a__ , """norm2""" , """after_norm""")
if "layer_scale_1" in key:
a_ : List[Any] = replace_key_with_offset(a__ , a__ , """layer_scale_1""" , """layer_scale_1""")
if "layer_scale_2" in key:
a_ : Optional[Any] = replace_key_with_offset(a__ , a__ , """layer_scale_2""" , """layer_scale_2""")
if "head" in key:
a_ : Optional[Any] = key.replace("""head""" , """classifier""")
a_ : Union[str, Any] = value
return new_state_dict
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a_ : Any = Image.open(requests.get(a__ , stream=a__).raw)
return image
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
a_ : str = PoolFormerConfig()
# set attributes based on model_name
a_ : Union[str, Any] = """huggingface/label-files"""
a_ : str = model_name[-3:]
a_ : Tuple = 1_0_0_0
a_ : List[str] = """imagenet-1k-id2label.json"""
a_ : Any = (1, 1_0_0_0)
# set config attributes
a_ : Optional[Any] = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""") , """r"""))
a_ : List[Any] = {int(a__): v for k, v in idalabel.items()}
a_ : Tuple = idalabel
a_ : int = {v: k for k, v in idalabel.items()}
if size == "s12":
a_ : Optional[int] = [2, 2, 6, 2]
a_ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : List[Any] = 4.0
a_ : Tuple = 0.9
elif size == "s24":
a_ : List[Any] = [4, 4, 1_2, 4]
a_ : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : List[Any] = 4.0
a_ : Optional[Any] = 0.9
elif size == "s36":
a_ : str = [6, 6, 1_8, 6]
a_ : Dict = [6_4, 1_2_8, 3_2_0, 5_1_2]
a_ : Optional[int] = 4.0
a_ : Optional[int] = 1e-6
a_ : Tuple = 0.9
elif size == "m36":
a_ : str = [6, 6, 1_8, 6]
a_ : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
a_ : str = 4.0
a_ : Union[str, Any] = 1e-6
a_ : str = 0.95
elif size == "m48":
a_ : List[Any] = [8, 8, 2_4, 8]
a_ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
a_ : int = 4.0
a_ : int = 1e-6
a_ : List[Any] = 0.95
else:
raise ValueError(f'''Size {size} not supported''')
# load image processor
a_ : Tuple = PoolFormerImageProcessor(crop_pct=a__)
# Prepare image
a_ : List[Any] = prepare_img()
a_ : List[str] = image_processor(images=a__ , return_tensors="""pt""").pixel_values
logger.info(f'''Converting model {model_name}...''')
# load original state dict
a_ : List[str] = torch.load(a__ , map_location=torch.device("""cpu"""))
# rename keys
a_ : List[Any] = rename_keys(a__)
# create HuggingFace model and load state dict
a_ : List[str] = PoolFormerForImageClassification(a__)
model.load_state_dict(a__)
model.eval()
# Define image processor
a_ : Tuple = PoolFormerImageProcessor(crop_pct=a__)
a_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""").pixel_values
# forward pass
a_ : Any = model(a__)
a_ : Any = outputs.logits
# define expected logit slices for different models
if size == "s12":
a_ : Union[str, Any] = torch.tensor([-0.3045, -0.6758, -0.4869])
elif size == "s24":
a_ : Optional[Any] = torch.tensor([0.4402, -0.1374, -0.8045])
elif size == "s36":
a_ : int = torch.tensor([-0.6080, -0.5133, -0.5898])
elif size == "m36":
a_ : List[str] = torch.tensor([0.3952, 0.2263, -1.2668])
elif size == "m48":
a_ : Union[str, Any] = torch.tensor([0.1167, -0.0656, -0.3423])
else:
raise ValueError(f'''Size {size} not supported''')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2)
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(a__).mkdir(exist_ok=a__)
model.save_pretrained(a__)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(a__)
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__snake_case : Optional[int] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 248 | 1 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowercase : Tuple = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
__lowercase : List[Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
__lowercase : Any = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
__lowercase : List[str] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
__lowercase : Dict = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ):
for tf_name, hf_name in patterns:
__a : Any = k.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return k
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Dict = BigBirdPegasusConfig(**_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = BigBirdPegasusForConditionalGeneration(_SCREAMING_SNAKE_CASE )
__a : Any = torch_model.state_dict()
__a : Dict = {}
# separating decoder weights
__a : str = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
__a : Optional[int] = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
__a : str = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE ):
continue
__a : int = DECODER_PATTERNS
__a : Dict = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__a : List[Any] = v.T
__a : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
__a : str = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE ):
continue
__a : Any = REMAINING_PATTERNS
__a : Union[str, Any] = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__a : Dict = v.T
__a : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__a : Union[str, Any] = mapping['model.embed_positions.weight']
__a : Optional[Any] = mapping.pop('model.embed_positions.weight' )
__a , __a : Optional[Any] = torch_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : Optional[int] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a : str = {}
__a : int = ['global_step']
for name, shape in tqdm(_SCREAMING_SNAKE_CASE , desc='converting tf checkpoint to dict' ):
__a : List[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
__a : Dict = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : List[Any] = array
return tf_weights
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ):
__a : int = get_tf_weights_as_numpy(_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = convert_bigbird_pegasus(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
__lowercase : List[str] = parser.parse_args()
__lowercase : Union[str, Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 360 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'laion/clap-htsat-unfused'
__a : Optional[Any] = tempfile.mkdtemp()
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_tokenizer()
__a : List[str] = self.get_feature_extractor()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__a : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : List[str] = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : int = self.get_tokenizer()
__a : str = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : int = floats_list((3, 1000) )
__a : str = feature_extractor(__a , return_tensors='np' )
__a : int = processor(audios=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_feature_extractor()
__a : Any = self.get_tokenizer()
__a : Any = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Union[str, Any] = 'This is a test string'
__a : Union[str, Any] = processor(text=__a )
__a : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_feature_extractor()
__a : str = self.get_tokenizer()
__a : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__a : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_feature_extractor()
__a : Optional[int] = self.get_tokenizer()
__a : int = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 294 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : Tuple = logging.get_logger(__name__)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if isinstance(_A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_A ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self : List[str] , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : List[str] , ):
"""simple docstring"""
super().__init__(**__lowercase )
snake_case_ = size if size is not None else {"shortest_edge": 2_24}
snake_case_ = get_size_dict(__lowercase , default_to_square=__lowercase )
snake_case_ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ = get_size_dict(__lowercase , param_name="crop_size" )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : int , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
"""simple docstring"""
snake_case_ = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" in size:
snake_case_ = get_resize_output_image_size(__lowercase , size["shortest_edge"] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
snake_case_ = (size["height"], size["width"])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case__ ( self : Dict , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Tuple , ):
"""simple docstring"""
snake_case_ = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__lowercase , size=(size["height"], size["width"]) , data_format=__lowercase , **__lowercase )
def snake_case__ ( self : Tuple , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ):
"""simple docstring"""
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case__ ( self : Union[str, Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ):
"""simple docstring"""
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def snake_case__ ( self : Any , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ = to_numpy_array(__lowercase )
if do_resize:
snake_case_ = self.resize(image=__lowercase , size=__lowercase , resample=__lowercase )
if do_center_crop:
snake_case_ = self.center_crop(__lowercase , size=__lowercase )
if do_rescale:
snake_case_ = self.rescale(image=__lowercase , scale=__lowercase )
if do_normalize:
snake_case_ = self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase )
snake_case_ = to_channel_dimension_format(__lowercase , __lowercase )
return image
def snake_case__ ( self : str , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : Union[str, Any] , ):
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(__lowercase , default_to_square=__lowercase )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(__lowercase , param_name="crop_size" )
if not valid_images(__lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
snake_case_ = make_batched(__lowercase )
snake_case_ = [
[
self._preprocess_image(
image=__lowercase , do_resize=__lowercase , size=__lowercase , resample=__lowercase , do_center_crop=__lowercase , crop_size=__lowercase , do_rescale=__lowercase , rescale_factor=__lowercase , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , data_format=__lowercase , )
for img in video
]
for video in videos
]
snake_case_ = {"pixel_values": videos}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 187 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowercase__ : str = NewType("DataClass", Any)
lowercase__ : Union[str, Any] = NewType("DataClassType", Any)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if isinstance(_A , _A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {str(_A ): choice for choice in choices}
return lambda _A : str_to_choice.get(_A , _A )
def lowerCamelCase__ ( *,
_A = None , _A = None , _A = dataclasses.MISSING , _A = dataclasses.MISSING , _A = None , **_A , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case_ = {}
if aliases is not None:
snake_case_ = aliases
if help is not None:
snake_case_ = help
return dataclasses.field(metadata=_A , default=_A , default_factory=_A , **_A )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = 42
def __init__( self : Optional[Any] , __lowercase : Union[DataClassType, Iterable[DataClassType]] , **__lowercase : Optional[int] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
snake_case_ = ArgumentDefaultsHelpFormatter
super().__init__(**__lowercase )
if dataclasses.is_dataclass(__lowercase ):
snake_case_ = [dataclass_types]
snake_case_ = list(__lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__lowercase )
@staticmethod
def snake_case__ ( __lowercase : ArgumentParser , __lowercase : dataclasses.Field ):
"""simple docstring"""
snake_case_ = f"--{field.name}"
snake_case_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __lowercase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
snake_case_ = kwargs.pop("aliases" , [] )
if isinstance(__lowercase , __lowercase ):
snake_case_ = [aliases]
snake_case_ = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__lowercase , "UnionType" ) and isinstance(__lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__lowercase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f" Problem encountered in field '{field.name}'." )
if type(__lowercase ) not in field.type.__args__:
# filter `str` in Union
snake_case_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case_ = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case_ = (
field.type.__args__[0] if isinstance(__lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case_ = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case_ = {}
if origin_type is Literal or (isinstance(field.type , __lowercase ) and issubclass(field.type , __lowercase )):
if origin_type is Literal:
snake_case_ = field.type.__args__
else:
snake_case_ = [x.value for x in field.type]
snake_case_ = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
else:
snake_case_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case_ = copy(__lowercase )
# Hack because type=bool in argparse does not behave as we want.
snake_case_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case_ = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case_ = "?"
# This is the value that will get picked if we do --field_name (without value)
snake_case_ = True
elif isclass(__lowercase ) and issubclass(__lowercase , __lowercase ):
snake_case_ = field.type.__args__[0]
snake_case_ = "+"
if field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case_ = True
else:
snake_case_ = field.type
if field.default is not dataclasses.MISSING:
snake_case_ = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case_ = field.default_factory()
else:
snake_case_ = True
parser.add_argument(__lowercase , *__lowercase , **__lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case_ = False
parser.add_argument(f"--no_{field.name}" , action="store_false" , dest=field.name , **__lowercase )
def snake_case__ ( self : List[str] , __lowercase : DataClassType ):
"""simple docstring"""
if hasattr(__lowercase , "_argument_group_name" ):
snake_case_ = self.add_argument_group(dtype._argument_group_name )
else:
snake_case_ = self
try:
snake_case_ = get_type_hints(__lowercase )
except NameError:
raise RuntimeError(
f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__lowercase ):
snake_case_ = ".".join(map(__lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__lowercase ):
if not field.init:
continue
snake_case_ = type_hints[field.name]
self._parse_dataclass_field(__lowercase , __lowercase )
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any]=None , __lowercase : Union[str, Any]=False , __lowercase : List[str]=True , __lowercase : int=None , __lowercase : Optional[int]=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case_ = []
if args_filename:
args_files.append(Path(__lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case_ = ArgumentParser()
args_file_parser.add_argument(__lowercase , type=__lowercase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case_ , snake_case_ = args_file_parser.parse_known_args(args=__lowercase )
snake_case_ = vars(__lowercase ).get(args_file_flag.lstrip("-" ) , __lowercase )
if cmd_args_file_paths:
args_files.extend([Path(__lowercase ) for p in cmd_args_file_paths] )
snake_case_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case_ = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case_ , snake_case_ = self.parse_known_args(args=__lowercase )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
snake_case_ = {k: v for k, v in vars(__lowercase ).items() if k in keys}
for k in keys:
delattr(__lowercase , __lowercase )
snake_case_ = dtype(**__lowercase )
outputs.append(__lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def snake_case__ ( self : int , __lowercase : Dict[str, Any] , __lowercase : bool = False ):
"""simple docstring"""
snake_case_ = set(args.keys() )
snake_case_ = []
for dtype in self.dataclass_types:
snake_case_ = {f.name for f in dataclasses.fields(__lowercase ) if f.init}
snake_case_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case_ = dtype(**__lowercase )
outputs.append(__lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(__lowercase )}" )
return tuple(__lowercase )
def snake_case__ ( self : List[Any] , __lowercase : str , __lowercase : bool = False ):
"""simple docstring"""
with open(Path(__lowercase ) , encoding="utf-8" ) as open_json_file:
snake_case_ = json.loads(open_json_file.read() )
snake_case_ = self.parse_dict(__lowercase , allow_extra_keys=__lowercase )
return tuple(__lowercase )
def snake_case__ ( self : int , __lowercase : str , __lowercase : bool = False ):
"""simple docstring"""
snake_case_ = self.parse_dict(yaml.safe_load(Path(__lowercase ).read_text() ) , allow_extra_keys=__lowercase )
return tuple(__lowercase )
| 187 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class _snake_case ( UpperCamelCase_ ):
def __init__( self , *a__ , **a__ ) -> None:
'''simple docstring'''
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , _a , )
super().__init__(*_a , **_a )
| 362 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = "Alexander Joslin"
import operator as op
from .stack import Stack
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
snake_case_ = Stack()
snake_case_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case )
elif i == ")":
# RULE 4
snake_case_ = operator_stack.peek()
operator_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operand_stack.peek()
operand_stack.pop()
snake_case_ = operators[opr](snake_case , snake_case )
operand_stack.push(snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 92 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
A__ = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""BeitFeatureExtractor"""]
A__ = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : List[str] = batch_size
a__ : List[str] = image_size
a__ : Dict = patch_size
a__ : Optional[Any] = num_channels
a__ : List[Any] = is_training
a__ : str = use_labels
a__ : Dict = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : List[str] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : Dict = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Optional[int] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : List[str] = (image_size // patch_size) ** 2
a__ : Any = num_patches + 1
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : Tuple = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : int = ViTMSNModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.type_sequence_label_size
a__ : List[str] = ViTMSNForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase , labels=lowercase)
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}')
print('Labels: {labels}')
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a__ : List[str] = 1
a__ : Optional[int] = ViTMSNForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ : Dict = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[Any] = config_and_inputs
a__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__A : Tuple = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : Any = False
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = ViTMSNModelTester(self)
a__ : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds')
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowercase)
a__ : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = ViTMSNModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> Dict:
a__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small') if is_vision_available() else None
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(2)
a__ : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small').to(lowercase)
a__ : Any = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : Tuple = model(**lowercase)
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Any = torch.tensor([-0.08_03, -0.44_54, -0.23_75]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
| 99 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """t5"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : int, __A : Tuple=3_2_1_2_8, __A : Any=5_1_2, __A : Optional[int]=6_4, __A : Tuple=2_0_4_8, __A : List[str]=6, __A : Any=None, __A : Dict=8, __A : Optional[int]=3_2, __A : List[str]=1_2_8, __A : Dict=0.1, __A : Any=1E-6, __A : Optional[Any]=1.0, __A : Optional[int]="relu", __A : List[str]=True, __A : Optional[Any]=True, __A : Optional[int]=0, __A : Optional[Any]=1, **__A : Tuple, ):
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[Any] = d_model
UpperCAmelCase : List[str] = d_kv
UpperCAmelCase : Union[str, Any] = d_ff
UpperCAmelCase : Optional[int] = num_layers
UpperCAmelCase : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase : Dict = num_heads
UpperCAmelCase : Optional[Any] = relative_attention_num_buckets
UpperCAmelCase : List[Any] = relative_attention_max_distance
UpperCAmelCase : List[Any] = dropout_rate
UpperCAmelCase : Any = layer_norm_epsilon
UpperCAmelCase : List[str] = initializer_factor
UpperCAmelCase : Union[str, Any] = feed_forward_proj
UpperCAmelCase : int = use_cache
UpperCAmelCase : Tuple = self.feed_forward_proj.split('''-''' )
UpperCAmelCase : Optional[int] = act_info[-1]
UpperCAmelCase : Optional[int] = act_info[0] == '''gated'''
if len(__A ) > 1 and act_info[0] != "gated" or len(__A ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase : Optional[int] = '''gelu_new'''
super().__init__(
pad_token_id=__A, eos_token_id=__A, is_encoder_decoder=__A, **__A, )
class __UpperCAmelCase ( lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Optional[Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
UpperCAmelCase : Dict = '''past_encoder_sequence + sequence'''
UpperCAmelCase : Any = {0: '''batch'''}
UpperCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase : Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
return common_inputs
@property
def __magic_name__ ( self : Union[str, Any] ):
return 1_3
| 99 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( UpperCAmelCase : int ) -> Dict:
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCAmelCase : float , UpperCAmelCase : float ) -> bool:
UpperCAmelCase : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : int = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Callable[[float], float] , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value)
def a__ ( UpperCAmelCase : int , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 ) -> None:
def identity_function(UpperCAmelCase : float ) -> float:
return x
UpperCAmelCase : int = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : Tuple = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def a__ ( UpperCAmelCase : int ) -> None:
def function_to_integrate(UpperCAmelCase : float ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Optional[int] = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 1 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : str = ConsistencyModelPipeline
_lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowercase : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
@property
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : str =UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : List[str] =UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def _lowercase ( self , lowerCAmelCase__=False ) -> List[str]:
'''simple docstring'''
if class_cond:
a__ : int =self.dummy_cond_unet
else:
a__ : Union[str, Any] =self.dummy_uncond_unet
# Default to CM multistep sampler
a__ : int =CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
a__ : Tuple ={
"unet": unet,
"scheduler": scheduler,
}
return components
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Any:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
a__ : Optional[Any] =torch.manual_seed(lowerCAmelCase__ )
else:
a__ : Dict =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a__ : Optional[int] ={
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [2_2, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : Union[str, Any] ="cpu" # ensure determinism for the device-dependent torch.Generator
a__ : int =self.get_dummy_components()
a__ : Tuple =ConsistencyModelPipeline(**lowerCAmelCase__ )
a__ : Any =pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Any =self.get_dummy_inputs(lowerCAmelCase__ )
a__ : List[Any] =pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
a__ : Union[str, Any] =image[0, -3:, -3:, -1]
a__ : Any =np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Any ="cpu" # ensure determinism for the device-dependent torch.Generator
a__ : str =self.get_dummy_components(class_cond=lowerCAmelCase__ )
a__ : Any =ConsistencyModelPipeline(**lowerCAmelCase__ )
a__ : List[str] =pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : List[Any] =self.get_dummy_inputs(lowerCAmelCase__ )
a__ : Any =0
a__ : Optional[int] =pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
a__ : int =image[0, -3:, -3:, -1]
a__ : Any =np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] ="cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Optional[int] =self.get_dummy_components()
a__ : Tuple =ConsistencyModelPipeline(**lowerCAmelCase__ )
a__ : List[Any] =pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : str =self.get_dummy_inputs(lowerCAmelCase__ )
a__ : Union[str, Any] =1
a__ : List[Any] =None
a__ : List[str] =pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
a__ : Dict =image[0, -3:, -3:, -1]
a__ : Any =np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[Any] ="cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Optional[int] =self.get_dummy_components(class_cond=lowerCAmelCase__ )
a__ : Tuple =ConsistencyModelPipeline(**lowerCAmelCase__ )
a__ : Union[str, Any] =pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Union[str, Any] =self.get_dummy_inputs(lowerCAmelCase__ )
a__ : Dict =1
a__ : List[Any] =None
a__ : int =0
a__ : List[str] =pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 3_2, 3_2, 3)
a__ : int =image[0, -3:, -3:, -1]
a__ : Optional[int] =np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self , lowerCAmelCase__=0 , lowerCAmelCase__=False , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=(1, 3, 6_4, 6_4) ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any =torch.manual_seed(lowerCAmelCase__ )
a__ : Dict ={
"num_inference_steps": None,
"timesteps": [2_2, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
a__ : Union[str, Any] =self.get_fixed_latents(seed=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ , shape=lowerCAmelCase__ )
a__ : Any =latents
return inputs
def _lowercase ( self , lowerCAmelCase__=0 , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=(1, 3, 6_4, 6_4) ) -> List[str]:
'''simple docstring'''
if type(lowerCAmelCase__ ) == str:
a__ : List[str] =torch.device(lowerCAmelCase__ )
a__ : str =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a__ : Any =randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
return latents
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[Any] =UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
a__ : Union[str, Any] =CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
a__ : int =ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Dict =self.get_inputs()
a__ : List[str] =pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
a__ : Any =image[0, -3:, -3:, -1]
a__ : Optional[Any] =np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
a__ : str =CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
a__ : List[Any] =ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : int =self.get_inputs()
a__ : Optional[Any] =1
a__ : Tuple =None
a__ : List[str] =pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
a__ : List[str] =image[0, -3:, -3:, -1]
a__ : List[Any] =np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
a__ : List[Any] =CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
a__ : Union[str, Any] =ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : List[str] =self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ):
a__ : Optional[int] =pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
a__ : str =image[0, -3:, -3:, -1]
a__ : Any =np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
a__ : Dict =CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
a__ : Any =ConsistencyModelPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
pipe.to(torch_device=lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Dict =self.get_inputs(get_fixed_latents=lowerCAmelCase__ , device=lowerCAmelCase__ )
a__ : Optional[Any] =1
a__ : Union[str, Any] =None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase__ , enable_math=lowerCAmelCase__ , enable_mem_efficient=lowerCAmelCase__ ):
a__ : Optional[Any] =pipe(**lowerCAmelCase__ ).images
assert image.shape == (1, 6_4, 6_4, 3)
a__ : int =image[0, -3:, -3:, -1]
a__ : int =np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 95 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """swin2sr"""
_lowercase : Tuple = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Optional[Any] =image_size
a__ : Dict =patch_size
a__ : Tuple =num_channels
a__ : Union[str, Any] =embed_dim
a__ : Optional[Any] =depths
a__ : List[str] =len(lowerCAmelCase__ )
a__ : Any =num_heads
a__ : Any =window_size
a__ : str =mlp_ratio
a__ : List[str] =qkv_bias
a__ : Dict =hidden_dropout_prob
a__ : List[str] =attention_probs_dropout_prob
a__ : Dict =drop_path_rate
a__ : Optional[Any] =hidden_act
a__ : Union[str, Any] =use_absolute_embeddings
a__ : Optional[Any] =layer_norm_eps
a__ : List[Any] =initializer_range
a__ : int =upscale
a__ : Optional[int] =img_range
a__ : Any =resi_connection
a__ : Optional[Any] =upsampler
| 95 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
a_ : Dict = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''', lowerCAmelCase, )
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
| 6 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a__( lowerCamelCase__ ):
lowercase__ = ["""vqvae"""]
def __init__( self : int , __snake_case : AutoencoderKL , __snake_case : UNetaDConditionModel , __snake_case : Mel , __snake_case : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case , mel=__snake_case , vqvae=__snake_case )
def lowercase_ ( self : str ):
return 50 if isinstance(self.scheduler , __snake_case ) else 10_00
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : int = 1 , __snake_case : str = None , __snake_case : np.ndarray = None , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = None , __snake_case : torch.Generator = None , __snake_case : float = 0 , __snake_case : float = 0 , __snake_case : torch.Generator = None , __snake_case : float = 0 , __snake_case : torch.Tensor = None , __snake_case : torch.Tensor = None , __snake_case : Union[str, Any]=True , ):
a : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__snake_case )
a : int = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
a : Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__snake_case , device=self.device , )
a : str = noise
a : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__snake_case , __snake_case )
a : str = self.mel.audio_slice_to_image(__snake_case )
a : Union[str, Any] = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
a : Optional[int] = (input_image / 2_55) * 2 - 1
a : str = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
a : int = self.vqvae.encode(torch.unsqueeze(__snake_case , 0 ) ).latent_dist.sample(
generator=__snake_case )[0]
a : Any = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : List[Any] = self.scheduler.add_noise(__snake_case , __snake_case , self.scheduler.timesteps[start_step - 1] )
a : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : str = int(mask_start_secs * pixels_per_second )
a : List[str] = int(mask_end_secs * pixels_per_second )
a : str = self.scheduler.add_noise(__snake_case , __snake_case , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __snake_case ):
a : int = self.unet(__snake_case , __snake_case , __snake_case )['sample']
else:
a : List[str] = self.unet(__snake_case , __snake_case )['sample']
if isinstance(self.scheduler , __snake_case ):
a : Union[str, Any] = self.scheduler.step(
model_output=__snake_case , timestep=__snake_case , sample=__snake_case , eta=__snake_case , generator=__snake_case , )['prev_sample']
else:
a : Dict = self.scheduler.step(
model_output=__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : Union[str, Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
a : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : Any = 1 / self.vqvae.config.scaling_factor * images
a : List[Any] = self.vqvae.decode(__snake_case )['sample']
a : Any = (images / 2 + 0.5).clamp(0 , 1 )
a : str = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
a : List[Any] = (images * 2_55).round().astype('uint8' )
a : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__snake_case , mode='RGB' ).convert('L' ) for _ in images) )
a : Dict = [self.mel.image_to_audio(__snake_case ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__snake_case )[:, np.newaxis, :] ) , **ImagePipelineOutput(__snake_case ) )
@torch.no_grad()
def lowercase_ ( self : str , __snake_case : List[Image.Image] , __snake_case : int = 50 ):
assert isinstance(self.scheduler , __snake_case )
self.scheduler.set_timesteps(__snake_case )
a : List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
a : Union[str, Any] = (sample / 2_55) * 2 - 1
a : str = torch.Tensor(__snake_case ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : List[Any] = self.scheduler.alphas_cumprod[t]
a : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : Optional[Any] = 1 - alpha_prod_t
a : int = self.unet(__snake_case , __snake_case )['sample']
a : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : int = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : List[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowercase_ ( __snake_case : torch.Tensor , __snake_case : torch.Tensor , __snake_case : float ):
a : str = acos(torch.dot(torch.flatten(__snake_case ) , torch.flatten(__snake_case ) ) / torch.norm(__snake_case ) / torch.norm(__snake_case ) )
return sin((1 - alpha) * theta ) * xa / sin(__snake_case ) + sin(alpha * theta ) * xa / sin(__snake_case ) | 297 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
while second != 0:
a : Union[str, Any] = first & second
first ^= second
a : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip())
lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip())
print(F"{add(first, second) = }") | 297 | 1 |
"""simple docstring"""
def lowercase ( A_ = 50 )-> str:
'''simple docstring'''
a : Optional[Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 356 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """char"""
UpperCAmelCase : Optional[Any] = """bpe"""
UpperCAmelCase : Optional[Any] = """wp"""
__lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = ["""image_processor""", """char_tokenizer"""]
UpperCAmelCase : Optional[Any] = """ViTImageProcessor"""
UpperCAmelCase : List[Any] = """MgpstrTokenizer"""
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : str):
a : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a : List[str] = kwargs.pop("feature_extractor")
a : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
a : Union[str, Any] = tokenizer
a : int = AutoTokenizer.from_pretrained("gpt2")
a : str = AutoTokenizer.from_pretrained("bert-base-uncased")
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def __call__( self : str , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : int):
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
a : List[str] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a : Optional[Any] = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
a : Any = encodings["input_ids"]
return inputs
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str]):
a , a , a : Tuple = sequences
a : Optional[int] = char_preds.size(0)
a , a : Dict = self._decode_helper(__UpperCAmelCase , "char")
a , a : Dict = self._decode_helper(__UpperCAmelCase , "bpe")
a , a : Union[str, Any] = self._decode_helper(__UpperCAmelCase , "wp")
a : Any = []
a : Union[str, Any] = []
for i in range(__UpperCAmelCase):
a : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
a : Optional[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
a : List[str] = scores.index(max(__UpperCAmelCase))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
a : Dict = {}
a : List[str] = final_strs
a : str = final_scores
a : int = char_strs
a : int = bpe_strs
a : Tuple = wp_strs
return out
def __snake_case ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]):
if format == DecodeType.CHARACTER:
a : int = self.char_decode
a : int = 1
a : Dict = "[s]"
elif format == DecodeType.BPE:
a : List[str] = self.bpe_decode
a : List[str] = 2
a : int = "#"
elif format == DecodeType.WORDPIECE:
a : Union[str, Any] = self.wp_decode
a : List[str] = 102
a : int = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''')
a , a : str = [], []
a : Optional[int] = pred_logits.size(0)
a : List[str] = pred_logits.size(1)
a , a : Tuple = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase)
a : List[str] = preds_index.view(-1 , __UpperCAmelCase)[:, 1:]
a : Any = decoder(__UpperCAmelCase)
a , a : Union[str, Any] = torch.nn.functional.softmax(__UpperCAmelCase , dim=2).max(dim=2)
a : Union[str, Any] = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase):
a : str = preds_str[index].find(__UpperCAmelCase)
a : Optional[Any] = preds_str[index][:pred_eos]
a : Optional[int] = preds_index[index].cpu().tolist()
a : Optional[int] = pred_index.index(__UpperCAmelCase) if eos_token in pred_index else -1
a : List[str] = preds_max_prob[index][: pred_eos_index + 1]
a : int = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase)
conf_scores.append(__UpperCAmelCase)
return dec_strs, conf_scores
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Any):
a : Dict = [seq.replace(" " , "") for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase)]
return decode_strs
def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[str]):
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Any = [seq.replace(" " , "") for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase)]
return decode_strs
| 226 | 0 |
_lowerCamelCase =8.314462 # Unit - J mol-1 K-1
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 334 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Tuple:
_A : Optional[int] = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
_A : int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_A : str = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
_A : Optional[int] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
_A : Optional[Any] = shift_tokens_right(_a , model.config.pad_token_id , model.config.decoder_start_token_id )
_A : Union[str, Any] = model(_a , decoder_input_ids=_a ).logits
_A : Dict = optax.softmax_cross_entropy(_a , onehot(_a , logits.shape[-1] ) ).mean()
_A : List[str] = -(labels.shape[-1] * loss.item())
_A : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 356 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Dict = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Union[str, Any] = """ctrl"""
_lowerCAmelCase : List[Any] = ["""past_key_values"""]
_lowerCAmelCase : Optional[Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , lowercase_ : Tuple=246534 , lowercase_ : Union[str, Any]=256 , lowercase_ : Optional[int]=1280 , lowercase_ : Optional[int]=8192 , lowercase_ : Tuple=48 , lowercase_ : Tuple=16 , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : Any=1E-6 , lowercase_ : Optional[int]=0.02 , lowercase_ : Any=True , **lowercase_ : int , ):
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Tuple = n_positions
snake_case_ : Tuple = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : Tuple = n_head
snake_case_ : Dict = dff
snake_case_ : Optional[int] = resid_pdrop
snake_case_ : Union[str, Any] = embd_pdrop
snake_case_ : str = layer_norm_epsilon
snake_case_ : str = initializer_range
snake_case_ : int = use_cache
super().__init__(**lowercase_ )
| 264 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : str = XLMRobertaTokenizer
_lowerCAmelCase : int = XLMRobertaTokenizerFast
_lowerCAmelCase : str = True
_lowerCAmelCase : Dict = True
def _snake_case ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[str] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : str ):
snake_case_ : List[Any] = '''<pad>'''
snake_case_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowercase_ ) , 1002 )
def _snake_case ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : int = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Tuple = tokenizer_r.save_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : Union[str, Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[str] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : List[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[Any] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : Tuple = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Optional[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : Dict = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def _snake_case ( self : List[str] ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _snake_case ( self : Optional[Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
snake_case_ : Any = XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
snake_case_ : List[Any] = pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def _snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Optional[int] = self.get_rust_tokenizer()
snake_case_ : Dict = '''I was born in 92000, and this is falsé.'''
snake_case_ : Optional[int] = tokenizer.tokenize(lowercase_ )
snake_case_ : Tuple = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ : str = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ : Any = tokenizer.encode(lowercase_ )
snake_case_ : int = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Tuple ):
snake_case_ : int = '''Hello World!'''
snake_case_ : int = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case_ : Optional[int] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : Dict ):
# fmt: off
snake_case_ : int = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 264 | 1 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowerCAmelCase_( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1.0 ,__UpperCAmelCase = None ,) -> Any:
super().__init__()
lowerCAmelCase__ : str = initial_learning_rate
lowerCAmelCase__ : Any = warmup_steps
lowerCAmelCase__ : Dict = power
lowerCAmelCase__ : int = decay_schedule_fn
lowerCAmelCase__ : Optional[Any] = name
def __call__( self ,__UpperCAmelCase ) -> str:
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase__ : Union[str, Any] = tf.cast(__UpperCAmelCase ,tf.floataa )
lowerCAmelCase__ : str = tf.cast(self.warmup_steps ,tf.floataa )
lowerCAmelCase__ : str = global_step_float / warmup_steps_float
lowerCAmelCase__ : Dict = self.initial_learning_rate * tf.math.pow(__UpperCAmelCase ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=__UpperCAmelCase ,)
def UpperCAmelCase_ ( self ) -> int:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = 0.9 , UpperCamelCase = 0.999 , UpperCamelCase = 1e-8 , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = 0.0 , UpperCamelCase = 1.0 , UpperCamelCase = None , ):
"""simple docstring"""
lowerCAmelCase__ : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCamelCase , )
if num_warmup_steps:
lowerCAmelCase__ : Dict = WarmUp(
initial_learning_rate=UpperCamelCase , decay_schedule_fn=UpperCamelCase , warmup_steps=UpperCamelCase , )
if weight_decay_rate > 0.0:
lowerCAmelCase__ : Optional[Any] = AdamWeightDecay(
learning_rate=UpperCamelCase , weight_decay_rate=UpperCamelCase , beta_a=UpperCamelCase , beta_a=UpperCamelCase , epsilon=UpperCamelCase , clipnorm=UpperCamelCase , global_clipnorm=UpperCamelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=UpperCamelCase , )
else:
lowerCAmelCase__ : List[str] = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase , beta_a=UpperCamelCase , beta_a=UpperCamelCase , epsilon=UpperCamelCase , clipnorm=UpperCamelCase , global_clipnorm=UpperCamelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = 0.0_0_1 ,__UpperCAmelCase = 0.9 ,__UpperCAmelCase = 0.9_9_9 ,__UpperCAmelCase = 1E-7 ,__UpperCAmelCase = False ,__UpperCAmelCase = 0.0 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "AdamWeightDecay" ,**__UpperCAmelCase ,) -> Union[str, Any]:
super().__init__(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = weight_decay_rate
lowerCAmelCase__ : Optional[Any] = include_in_weight_decay
lowerCAmelCase__ : Any = exclude_from_weight_decay
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : str = {"""WarmUp""": WarmUp}
return super(__UpperCAmelCase ,cls ).from_config(__UpperCAmelCase ,custom_objects=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
super(__UpperCAmelCase ,self )._prepare_local(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : str = tf.constant(
self.weight_decay_rate ,name="""adam_weight_decay_rate""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : str = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] ,use_locking=self._use_locking ,)
return tf.no_op()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = list(zip(*__UpperCAmelCase ) )
return super(__UpperCAmelCase ,self ).apply_gradients(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,name=__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase__ : Any = apply_state or {}
lowerCAmelCase__ : Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase__ : List[Any] = self._fallback_apply_state(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self._get_lr(var.device ,var.dtype.base_dtype ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = self._decay_weights_op(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(__UpperCAmelCase ,self )._resource_apply_dense(__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_lr(var.device ,var.dtype.base_dtype ,__UpperCAmelCase )
lowerCAmelCase__ : Tuple = self._decay_weights_op(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(__UpperCAmelCase ,self )._resource_apply_sparse(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__UpperCAmelCase ,__UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__UpperCAmelCase ,__UpperCAmelCase ) is not None:
return False
return True
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ) -> int:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Tuple = None
@property
def UpperCAmelCase_ ( self ) -> Any:
if self._accum_steps is None:
lowerCAmelCase__ : List[str] = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=__UpperCAmelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def UpperCAmelCase_ ( self ) -> str:
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self ,__UpperCAmelCase ) -> List[str]:
if not self._gradients:
lowerCAmelCase__ : str = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__UpperCAmelCase ) ,trainable=__UpperCAmelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__UpperCAmelCase )}""" )
for accum_gradient, gradient in zip(self._gradients ,__UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__UpperCAmelCase )
self._accum_steps.assign_add(1 )
def UpperCAmelCase_ ( self ) -> Tuple:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__UpperCAmelCase ) )
| 184 |
'''simple docstring'''
import os
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = len(grid[0] )
lowerCAmelCase__ : int = len(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(UpperCamelCase ):
for j in range(n_rows - 3 ):
lowerCAmelCase__ : str = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCAmelCase__ : Optional[int] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCAmelCase__ : Optional[int] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCAmelCase__ : Tuple = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCAmelCase__ : Dict = max(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if max_product > largest:
lowerCAmelCase__ : Any = max_product
return largest
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
with open(os.path.dirname(UpperCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
lowerCAmelCase__ : Dict = [[int(UpperCamelCase ) for i in grid[j]] for j in range(len(UpperCamelCase ) )]
return largest_product(UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 184 | 1 |
def __lowercase ( lowerCamelCase : list ):
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(_lowerCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_lowerCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 175 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase : Any =True
except ImportError:
lowerCamelCase : int =False
try:
from torch.hub import _get_torch_home
lowerCamelCase : Optional[int] =_get_torch_home()
except ImportError:
lowerCamelCase : Optional[Any] =os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
lowerCamelCase : Optional[Any] =os.path.join(torch_cache_home, '''transformers''')
lowerCamelCase : Optional[Any] ='''https://cdn.huggingface.co'''
lowerCamelCase : int ='''https://s3.amazonaws.com/models.huggingface.co/bert'''
lowerCamelCase : Dict ='''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
lowerCamelCase : str =os.path.join(PATH, '''config.yaml''')
lowerCamelCase : str =os.path.join(PATH, '''attributes.txt''')
lowerCamelCase : str =os.path.join(PATH, '''objects.txt''')
lowerCamelCase : List[Any] =os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
lowerCamelCase : str =os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase : str =os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase : Any ='''pytorch_model.bin'''
lowerCamelCase : str ='''config.yaml'''
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=OBJECTS , __lowerCAmelCase=ATTRIBUTES ) -> Optional[int]:
UpperCamelCase__ : Tuple = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
UpperCamelCase__ : str = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
UpperCamelCase__ : Optional[Any] = OrderedDict()
with open(__lowerCAmelCase , "rb" ) as f:
UpperCamelCase__ : Optional[Any] = pkl.load(__lowerCAmelCase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCamelCase__ : Union[str, Any] = ckp.pop(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , np.ndarray ):
UpperCamelCase__ : str = torch.tensor(__lowerCAmelCase )
else:
assert isinstance(__lowerCAmelCase , torch.tensor ), type(__lowerCAmelCase )
UpperCamelCase__ : List[Any] = v
return r
class __a :
_lowerCAmelCase : List[Any] = {}
def __init__( self : Dict , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : str = "root" , SCREAMING_SNAKE_CASE : List[str]=0 ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = name
UpperCamelCase__ : List[str] = level
UpperCamelCase__ : Dict = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCamelCase__ : Optional[int] = copy.deepcopy(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = copy.deepcopy(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = Config(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE , level=level + 1 )
UpperCamelCase__ : Dict = v
setattr(self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = d
def __repr__( self : int ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = val
UpperCamelCase__ : Any = val
UpperCamelCase__ : Any = key.split("." )
UpperCamelCase__ : List[str] = len(SCREAMING_SNAKE_CASE ) - 1
UpperCamelCase__ : Optional[int] = self._pointer
if len(SCREAMING_SNAKE_CASE ) > 1:
for i, l in enumerate(SCREAMING_SNAKE_CASE ):
if hasattr(self , SCREAMING_SNAKE_CASE ) and isinstance(getattr(self , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
setattr(getattr(self , SCREAMING_SNAKE_CASE ) , ".".join(levels[i:] ) , SCREAMING_SNAKE_CASE )
if l == last_level:
UpperCamelCase__ : Union[str, Any] = val
else:
UpperCamelCase__ : Any = pointer[l]
def __lowercase ( self : Tuple ):
'''simple docstring'''
return self._pointer
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
with open(F'{file_name}' , "w" ) as stream:
dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
with open(F'{file_name}' , "w" ) as stream:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@staticmethod
def __lowercase ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE ) as stream:
UpperCamelCase__ : int = load(SCREAMING_SNAKE_CASE , Loader=SCREAMING_SNAKE_CASE )
return data
def __str__( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = " "
if self._name != "root":
UpperCamelCase__ : int = F'{t * (self._level-1)}{self._name}:\n'
else:
UpperCamelCase__ : Optional[int] = ""
UpperCamelCase__ : Union[str, Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
r += F'{t * (self._level)}{v}\n'
self._level += 1
else:
r += F'{t * (self._level)}{k}: {v} ({type(SCREAMING_SNAKE_CASE ).__name__})\n'
UpperCamelCase__ : Optional[Any] = level
return r[:-1]
@classmethod
def __lowercase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return cls(SCREAMING_SNAKE_CASE )
@classmethod
def __lowercase ( cls : str , SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = kwargs.pop("cache_dir" , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = kwargs.pop("force_download" , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = kwargs.pop("resume_download" , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = kwargs.pop("proxies" , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = kwargs.pop("local_files_only" , SCREAMING_SNAKE_CASE )
if os.path.isdir(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif os.path.isfile(SCREAMING_SNAKE_CASE ) or is_remote_url(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[int] = pretrained_model_name_or_path
else:
UpperCamelCase__ : Optional[Any] = hf_bucket_url(SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , use_cdn=SCREAMING_SNAKE_CASE )
try:
# Load from URL or cache if already cached
UpperCamelCase__ : str = cached_path(
SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCamelCase__ : Union[str, Any] = Config.load_yaml(SCREAMING_SNAKE_CASE )
except EnvironmentError:
UpperCamelCase__ : Union[str, Any] = "Can't load config for"
raise EnvironmentError(SCREAMING_SNAKE_CASE )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(SCREAMING_SNAKE_CASE ), kwargs
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
UpperCamelCase__ : Tuple = torch.load("dump.pt" , map_location=in_tensor.device )
UpperCamelCase__ : Union[str, Any] = in_tensor.numpy()
UpperCamelCase__ : List[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.0_1 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
UpperCamelCase__ : Union[str, Any] = urlparse(__lowerCAmelCase )
return parsed.scheme in ("http", "https")
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> str:
UpperCamelCase__ : List[str] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCamelCase__ : int = "/" not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=0 , __lowerCAmelCase=None , ) -> Optional[Any]:
UpperCamelCase__ : str = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + "; ".join("{}/{}".format(__lowerCAmelCase , __lowerCAmelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + user_agent
UpperCamelCase__ : int = {"user-agent": ua}
if resume_size > 0:
UpperCamelCase__ : Union[str, Any] = "bytes=%d-" % (resume_size,)
UpperCamelCase__ : Union[str, Any] = requests.get(__lowerCAmelCase , stream=__lowerCAmelCase , proxies=__lowerCAmelCase , headers=__lowerCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
UpperCamelCase__ : Dict = response.headers.get("Content-Length" )
UpperCamelCase__ : Any = resume_size + int(__lowerCAmelCase ) if content_length is not None else None
UpperCamelCase__ : Optional[Any] = tqdm(
unit="B" , unit_scale=__lowerCAmelCase , total=__lowerCAmelCase , initial=__lowerCAmelCase , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCAmelCase ) )
temp_file.write(__lowerCAmelCase )
progress.close()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=10 , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , ) -> List[Any]:
if cache_dir is None:
UpperCamelCase__ : List[str] = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : Union[str, Any] = str(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
UpperCamelCase__ : List[Any] = None
if not local_files_only:
try:
UpperCamelCase__ : Any = requests.head(__lowerCAmelCase , allow_redirects=__lowerCAmelCase , proxies=__lowerCAmelCase , timeout=__lowerCAmelCase )
if response.status_code == 200:
UpperCamelCase__ : Tuple = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCamelCase__ : Optional[int] = url_to_filename(__lowerCAmelCase , __lowerCAmelCase )
# get cache path to put the file
UpperCamelCase__ : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCAmelCase ):
return cache_path
else:
UpperCamelCase__ : Tuple = [
file
for file in fnmatch.filter(os.listdir(__lowerCAmelCase ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(__lowerCAmelCase ) > 0:
return os.path.join(__lowerCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCamelCase__ : Tuple = cache_path + ".lock"
with FileLock(__lowerCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCamelCase__ : str = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(__lowerCAmelCase , "a+b" ) as f:
yield f
UpperCamelCase__ : Optional[int] = _resumable_file_manager
if os.path.exists(__lowerCAmelCase ):
UpperCamelCase__ : List[Any] = os.stat(__lowerCAmelCase ).st_size
else:
UpperCamelCase__ : Union[str, Any] = 0
else:
UpperCamelCase__ : Any = partial(tempfile.NamedTemporaryFile , dir=__lowerCAmelCase , delete=__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , __lowerCAmelCase , temp_file.name , )
http_get(
__lowerCAmelCase , __lowerCAmelCase , proxies=__lowerCAmelCase , resume_size=__lowerCAmelCase , user_agent=__lowerCAmelCase , )
os.replace(temp_file.name , __lowerCAmelCase )
UpperCamelCase__ : Optional[int] = {"url": url, "etag": etag}
UpperCamelCase__ : Optional[Any] = cache_path + ".json"
with open(__lowerCAmelCase , "w" ) as meta_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
return cache_path
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[Any]:
UpperCamelCase__ : int = url.encode("utf-8" )
UpperCamelCase__ : Any = shaaaa(__lowerCAmelCase )
UpperCamelCase__ : List[str] = url_hash.hexdigest()
if etag:
UpperCamelCase__ : Tuple = etag.encode("utf-8" )
UpperCamelCase__ : Union[str, Any] = shaaaa(__lowerCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ) -> List[str]:
if cache_dir is None:
UpperCamelCase__ : Optional[int] = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : List[str] = str(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : Optional[int] = str(__lowerCAmelCase )
if is_remote_url(__lowerCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
UpperCamelCase__ : int = get_from_cache(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , user_agent=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
elif os.path.exists(__lowerCAmelCase ):
# File, and it exists.
UpperCamelCase__ : str = url_or_filename
elif urlparse(__lowerCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(__lowerCAmelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(__lowerCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCAmelCase ) and not tarfile.is_tarfile(__lowerCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCamelCase__ , UpperCamelCase__ : Tuple = os.path.split(__lowerCAmelCase )
UpperCamelCase__ : str = output_file.replace("." , "-" ) + "-extracted"
UpperCamelCase__ : str = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isdir(__lowerCAmelCase ) and os.listdir(__lowerCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCamelCase__ : Union[str, Any] = output_path + ".lock"
with FileLock(__lowerCAmelCase ):
shutil.rmtree(__lowerCAmelCase , ignore_errors=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase )
if is_zipfile(__lowerCAmelCase ):
with ZipFile(__lowerCAmelCase , "r" ) as zip_file:
zip_file.extractall(__lowerCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCAmelCase ):
UpperCamelCase__ : List[Any] = tarfile.open(__lowerCAmelCase )
tar_file.extractall(__lowerCAmelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(__lowerCAmelCase ) )
return output_path_extracted
return output_path
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="," ) -> Any:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as f:
UpperCamelCase__ : Tuple = eval(f.read() )
else:
UpperCamelCase__ : List[Any] = requests.get(__lowerCAmelCase )
try:
UpperCamelCase__ : str = requests.json()
except Exception:
UpperCamelCase__ : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCamelCase__ : Union[str, Any] = eval(__lowerCAmelCase )
except Exception:
UpperCamelCase__ : List[Any] = data.split("\n" )
req.close()
return data
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
UpperCamelCase__ : str = requests.get(__lowerCAmelCase )
UpperCamelCase__ : Any = np.array(Image.open(BytesIO(response.content ) ) )
return img
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCAmelCase )
with open(__lowerCAmelCase , "rb" ) as stream:
UpperCamelCase__ : int = pkl.load(__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = weights.pop("model" )
UpperCamelCase__ : Tuple = {}
for k, v in model.items():
UpperCamelCase__ : Optional[Any] = torch.from_numpy(__lowerCAmelCase )
if "running_var" in k:
UpperCamelCase__ : Dict = torch.tensor([0] )
UpperCamelCase__ : Dict = k.replace("running_var" , "num_batches_tracked" )
UpperCamelCase__ : Dict = zero
return new
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
print(f'{os.path.abspath(os.path.join(__lowerCAmelCase , os.pardir ) )}/demo.ipynb' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="RGB" ) -> Union[str, Any]:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
UpperCamelCase__ : Optional[Any] = cva.imread(__lowerCAmelCase )
else:
UpperCamelCase__ : Any = get_image_from_url(__lowerCAmelCase )
assert img is not None, f'could not connect to: {im}'
UpperCamelCase__ : Tuple = cva.cvtColor(__lowerCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCamelCase__ : Optional[Any] = img[:, :, ::-1]
return img
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=1 ) -> int:
return (images[i : i + batch] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )) | 196 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase : Optional[Any] =False
class __a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : Dict = torch.manual_seed(0 )
UpperCamelCase__ : str = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = generator.manual_seed(0 )
UpperCamelCase__ : Dict = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = "cyberpunk 2077"
UpperCamelCase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Any = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : List[str] = "A painting of a squirrel eating a burger "
UpperCamelCase__ : List[Any] = torch.manual_seed(0 )
UpperCamelCase__ : Dict = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Dict = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Any = pipe.image_variation(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type="numpy" ).images
UpperCamelCase__ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : int = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 196 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
UpperCamelCase = dict(zip(__a , range(len(__a ) ) ) )
UpperCamelCase = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
UpperCamelCase = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_60_00,
"return_attention_mask": False,
"do_normalize": True,
}
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
# load decoder from hub
UpperCamelCase = "hf-internal-testing/ngram-beam-search-decoder"
def snake_case_ (self , **__a ) -> Optional[Any]:
UpperCamelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case_ (self , **__a ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def snake_case_ (self , **__a ) -> Any:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def snake_case_ (self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = self.get_decoder()
UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case_ (self ) -> List[str]:
UpperCamelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__a , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_decoder()
UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
UpperCamelCase = floats_list((3, 10_00) )
UpperCamelCase = feature_extractor(__a , return_tensors="np" )
UpperCamelCase = processor(__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ (self ) -> List[str]:
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_decoder()
UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
UpperCamelCase = "This is a test string"
UpperCamelCase = processor(text=__a )
UpperCamelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ (self , __a=(2, 10, 16) , __a=77 ) -> str:
np.random.seed(__a )
return np.random.rand(*__a )
def snake_case_ (self ) -> Dict:
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_decoder()
UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
UpperCamelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCamelCase = processor.decode(__a )
UpperCamelCase = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def snake_case_ (self , __a ) -> Optional[int]:
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_decoder()
UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
UpperCamelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCamelCase = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
UpperCamelCase = processor.batch_decode(__a , __a )
UpperCamelCase = list(__a )
with get_context("fork" ).Pool() as p:
UpperCamelCase = decoder.decode_beams_batch(__a , __a )
UpperCamelCase , UpperCamelCase , UpperCamelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def snake_case_ (self ) -> Any:
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_decoder()
UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
UpperCamelCase = self._get_dummy_logits()
UpperCamelCase = 15
UpperCamelCase = -20.0
UpperCamelCase = -4.0
UpperCamelCase = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
UpperCamelCase = decoded_processor_out.text
UpperCamelCase = list(__a )
with get_context("fork" ).Pool() as pool:
UpperCamelCase = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
UpperCamelCase = [d[0][0] for d in decoded_decoder_out]
UpperCamelCase = [d[0][2] for d in decoded_decoder_out]
UpperCamelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1e-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1e-3 ) )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_decoder()
UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
UpperCamelCase = self._get_dummy_logits()
UpperCamelCase = 2.0
UpperCamelCase = 5.0
UpperCamelCase = -20.0
UpperCamelCase = True
UpperCamelCase = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
UpperCamelCase = decoded_processor_out.text
UpperCamelCase = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("fork" ).Pool() as pool:
UpperCamelCase = decoder.decode_beams_batch(
__a , __a , )
UpperCamelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __a )
UpperCamelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCamelCase = processor.decoder.model_container[processor.decoder._model_key]
UpperCamelCase = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
UpperCamelCase = os.listdir(__a )
UpperCamelCase = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = snapshot_download("hf-internal-testing/processor_with_lm" )
UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained(__a )
UpperCamelCase = processor.decoder.model_container[processor.decoder._model_key]
UpperCamelCase = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
UpperCamelCase = os.listdir(__a )
UpperCamelCase = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def snake_case_ (self ) -> str:
UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCamelCase = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCamelCase = floats_list((3, 10_00) )
UpperCamelCase = processor_wavaveca(__a , return_tensors="np" )
UpperCamelCase = processor_auto(__a , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCamelCase = self._get_dummy_logits()
UpperCamelCase = processor_wavaveca.batch_decode(__a )
UpperCamelCase = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_decoder()
UpperCamelCase = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def snake_case_ (__a , __a ) -> Optional[int]:
UpperCamelCase = [d[key] for d in offsets]
return retrieved_list
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCamelCase = self._get_dummy_logits()[0]
UpperCamelCase = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def snake_case_ (self ) -> Dict:
UpperCamelCase = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCamelCase = self._get_dummy_logits()
UpperCamelCase = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__a , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case_ (self ) -> int:
import torch
UpperCamelCase = load_dataset("common_voice" , "en" , split="train" , streaming=__a )
UpperCamelCase = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_60_00 ) )
UpperCamelCase = iter(__a )
UpperCamelCase = next(__a )
UpperCamelCase = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
UpperCamelCase = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCamelCase = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
UpperCamelCase = model(__a ).logits.cpu().numpy()
UpperCamelCase = processor.decode(logits[0] , output_word_offsets=__a )
UpperCamelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCamelCase = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
UpperCamelCase = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__a , "word" ) ) , __a )
self.assertEqual(" ".join(self.get_from_offsets(__a , "word" ) ) , output.text )
# output times
UpperCamelCase = torch.tensor(self.get_from_offsets(__a , "start_time" ) )
UpperCamelCase = torch.tensor(self.get_from_offsets(__a , "end_time" ) )
# fmt: off
UpperCamelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
UpperCamelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 153 |
"""simple docstring"""
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
UpperCamelCase = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 153 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 16_00, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 16_00, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[str] ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=A , )
assert hasattr(self , """env""" )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Any ):
# configuration for running training on smdistributed Model Parallel
__snake_case: Union[str, Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
__snake_case: List[Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
__snake_case: List[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
__snake_case: Optional[int] = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="""py36""" , )
def UpperCAmelCase__ ( self : List[Any] , A : Union[str, Any] ):
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCAmelCase__ ( self : List[str] , A : List[str] ):
# create estimator
__snake_case: List[str] = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
__snake_case: List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__snake_case: Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__snake_case: Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case: List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A )
| 293 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableUnCLIPImgaImgPipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 32
SCREAMING_SNAKE_CASE_ : Dict = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPImageProcessor(crop_size=32,size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_A,projection_dim=_A,num_hidden_layers=5,num_attention_heads=4,image_size=32,intermediate_size=37,patch_size=1,) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=_A )
SCREAMING_SNAKE_CASE_ : List[str] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=_A,projection_dim=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = UNetaDConditionModel(
sample_size=32,in_channels=4,out_channels=4,down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"),block_out_channels=(32, 64),attention_head_dim=(2, 4),class_embed_type="projection",projection_class_embeddings_input_dim=embedder_projection_dim * 2,cross_attention_dim=_A,layers_per_block=1,upcast_attention=_A,use_linear_projection=_A,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear",beta_start=0.00085,beta_end=0.012,prediction_type="v_prediction",set_alpha_to_one=_A,steps_offset=1,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = AutoencoderKL()
SCREAMING_SNAKE_CASE_ : str = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def __UpperCamelCase ( self : int,_A : str,_A : Optional[int]=0,_A : Union[str, Any]=True ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor((1, 3, 32, 32),rng=random.Random(_A ) ).to(_A )
if pil_image:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_image.clamp(0,1 )
SCREAMING_SNAKE_CASE_ : int = input_image.cpu().permute(0,2,3,1 ).float().numpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(_A )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Dict = StableUnCLIPImgaImgPipeline(**_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_A )
inputs.update({"image_embeds": None} )
SCREAMING_SNAKE_CASE_ : Dict = sd_pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),reason="XFormers attention is only available with CUDA and `xformers` installed",)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
SCREAMING_SNAKE_CASE_ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img",torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(_A,"anime turle",generator=_A,output_type="np" )
SCREAMING_SNAKE_CASE_ : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
SCREAMING_SNAKE_CASE_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE_ : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img",torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(_A,"anime turle",generator=_A,output_type="np" )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A,_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img",torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(
_A,"anime turtle",num_inference_steps=2,output_type="np",)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 18 |
import argparse
import os
import re
A_ : List[str] = 'src/diffusers'
# Pattern that looks at the indentation in a line.
A_ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A_ : int = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A_ : Optional[int] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A_ : List[Any] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A_ : List[str] = re.compile(r'\[([^\]]+)\]')
def UpperCamelCase (lowercase_: List[str] ) -> Dict:
A__ : Optional[Any] = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def UpperCamelCase (lowercase_: Dict , lowercase_: Any="" , lowercase_: Any=None , lowercase_: Any=None ) -> Tuple:
A__ : Optional[Any] = 0
A__ : str = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
A__ : Tuple = ["""\n""".join(lines[:index] )]
else:
A__ : Optional[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : Union[str, Any] = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
A__ : Union[str, Any] = [lines[index + 1]]
index += 1
else:
A__ : List[Any] = []
else:
blocks.append("""\n""".join(lowercase_ ) )
A__ : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append("""\n""".join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCamelCase (lowercase_: str ) -> str:
def _inner(lowercase_: Union[str, Any] ):
return key(lowercase_ ).lower().replace("""_""" , """""" )
return _inner
def UpperCamelCase (lowercase_: int , lowercase_: Any=None ) -> str:
# If no key is provided, we use a noop.
def noop(lowercase_: Any ):
return x
if key is None:
A__ : Optional[Any] = noop
# Constants are all uppercase, they go first.
A__ : Optional[int] = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : List[Any] = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : Tuple = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
A__ : Any = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def UpperCamelCase (lowercase_: List[Any] ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(lowercase_: List[Any] ):
A__ : Tuple = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
A__ : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(lowercase_ )] ) + "]"
A__ : Dict = import_statement.split("""\n""" )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : List[str] = 2 if lines[1].strip() == """[""" else 1
A__ : Any = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : Any = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
A__ : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
A__ : Any = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : Tuple = keys[:-1]
A__ : List[Any] = get_indent(lines[1] ) + """, """.join([f"""\"{k}\"""" for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
A__ : int = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: str=True ) -> Any:
with open(lowercase_ , """r""" ) as f:
A__ : Optional[int] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : Tuple = split_code_in_indented_blocks(
lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : int = main_blocks[block_idx]
A__ : Optional[Any] = block.split("""\n""" )
# Get to the start of the imports.
A__ : Any = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : Optional[Any] = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : Union[str, Any] = """\n""".join(block_lines[line_idx:-1] )
A__ : List[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Union[str, Any] = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : Optional[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : int = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : int = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
A__ : List[Any] = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Optional[int] = 0
A__ : Any = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
A__ : Tuple = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(lowercase_ , """w""" ) as f:
f.write("""\n""".join(lowercase_ ) )
def UpperCamelCase (lowercase_: Any=True ) -> Any:
A__ : Dict = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
A__ : List[Any] = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ )
if result:
A__ : Optional[int] = [os.path.join(lowercase_ , """__init__.py""" )]
if len(lowercase_ ) > 0:
raise ValueError(f"""Would overwrite {len(lowercase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A_ : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 192 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 359 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__(self : Optional[int] , a__ : Tuple=None , **a__ : Optional[int] ):
"""simple docstring"""
super().__init__(features=a__ )
__snake_case = torch_tensor_kwargs
import torch # noqa import torch at initialization
def a (self : Union[str, Any] , a__ : Union[str, Any] ):
"""simple docstring"""
import torch
if isinstance(a__ , a__ ) and column:
if all(
isinstance(a__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a__ )
return column
def a (self : Optional[Any] , a__ : str ):
"""simple docstring"""
import torch
if isinstance(a__ , (str, bytes, type(a__ )) ):
return value
elif isinstance(a__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case = {}
if isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case = {'''dtype''': torch.intaa}
elif isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a__ , PIL.Image.Image ):
__snake_case = np.asarray(a__ )
return torch.tensor(a__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def a (self : Optional[int] , a__ : str ):
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(a__ , '''__array__''' ) and not isinstance(a__ , torch.Tensor ):
__snake_case = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
elif isinstance(a__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
return self._tensorize(a__ )
def a (self : Optional[Any] , a__ : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , a__ , map_list=a__ )
def a (self : Optional[int] , a__ : pa.Table ):
"""simple docstring"""
__snake_case = self.numpy_arrow_extractor().extract_row(a__ )
__snake_case = self.python_features_decoder.decode_row(a__ )
return self.recursive_tensorize(a__ )
def a (self : List[Any] , a__ : pa.Table ):
"""simple docstring"""
__snake_case = self.numpy_arrow_extractor().extract_column(a__ )
__snake_case = self.python_features_decoder.decode_column(a__ , pa_table.column_names[0] )
__snake_case = self.recursive_tensorize(a__ )
__snake_case = self._consolidate(a__ )
return column
def a (self : Optional[int] , a__ : pa.Table ):
"""simple docstring"""
__snake_case = self.numpy_arrow_extractor().extract_batch(a__ )
__snake_case = self.python_features_decoder.decode_batch(a__ )
__snake_case = self.recursive_tensorize(a__ )
for column_name in batch:
__snake_case = self._consolidate(batch[column_name] )
return batch
| 238 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a : List[Any] = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =["""input_features""", """is_longer"""]
def __init__( self , lowerCAmelCase__=64 , lowerCAmelCase__=4_8000 , lowerCAmelCase__=480 , lowerCAmelCase__=10 , lowerCAmelCase__=1024 , lowerCAmelCase__=0.0 , lowerCAmelCase__=False , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 1_4000 , lowerCAmelCase__ = None , lowerCAmelCase__ = "fusion" , lowerCAmelCase__ = "repeatpad" , **lowerCAmelCase__ , ) -> str:
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : List[str] = top_db
a : str = truncation
a : List[Any] = padding
a : Dict = fft_window_size
a : Tuple = (fft_window_size >> 1) + 1
a : Optional[Any] = hop_length
a : List[str] = max_length_s
a : Union[str, Any] = max_length_s * sampling_rate
a : Optional[int] = sampling_rate
a : List[Any] = frequency_min
a : Dict = frequency_max
a : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCAmelCase__ , min_frequency=lowerCAmelCase__ , max_frequency=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , norm=lowerCAmelCase__ , mel_scale="htk" , )
a : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCAmelCase__ , min_frequency=lowerCAmelCase__ , max_frequency=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , norm="slaney" , mel_scale="slaney" , )
def __a ( self ) -> Dict[str, Any]:
a : Optional[Any] = copy.deepcopy(self.__dict__ )
a : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> np.ndarray:
a : Dict = spectrogram(
lowerCAmelCase__ , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCAmelCase__ , log_mel="dB" , )
return log_mel_spectrogram.T
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[int] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
a : Any = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
a : int = [0]
# randomly choose index for each part
a : Optional[int] = np.random.choice(ranges[0] )
a : List[Any] = np.random.choice(ranges[1] )
a : List[str] = np.random.choice(ranges[2] )
a : Union[str, Any] = mel[idx_front : idx_front + chunk_frames, :]
a : List[str] = mel[idx_middle : idx_middle + chunk_frames, :]
a : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
a : Any = torch.tensor(mel[None, None, :] )
a : Any = torch.nn.functional.interpolate(
lowerCAmelCase__ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=lowerCAmelCase__ )
a : Optional[int] = mel_shrink[0][0].numpy()
a : List[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
a : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
a : Optional[int] = len(lowerCAmelCase__ ) - max_length
a : List[str] = np.random.randint(0 , overflow + 1 )
a : int = waveform[idx : idx + max_length]
a : int = self._np_extract_fbank_features(lowerCAmelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
a : Optional[Any] = self._np_extract_fbank_features(lowerCAmelCase__ , self.mel_filters )
a : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
a : Union[str, Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
a : Optional[int] = np.stack([mel, mel, mel, mel] , axis=0 )
a : str = False
else:
a : Optional[int] = self._random_mel_fusion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
a : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
a : Optional[int] = int(max_length / len(lowerCAmelCase__ ) )
a : List[str] = np.stack(np.tile(lowerCAmelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
a : Union[str, Any] = int(max_length / len(lowerCAmelCase__ ) )
a : Optional[Any] = np.stack(np.tile(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = np.pad(lowerCAmelCase__ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
a : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ , self.mel_filters )
a : int = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
a : str = self._np_extract_fbank_features(lowerCAmelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
a : Union[str, Any] = truncation if truncation is not None else self.truncation
a : Dict = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a : List[Any] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a : int = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a : List[str] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
a : int = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a : int = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
a : List[Any] = [
self._get_input_mel(lowerCAmelCase__ , max_length if max_length else self.nb_max_samples , lowerCAmelCase__ , lowerCAmelCase__ )
for waveform in raw_speech
]
a : int = []
a : int = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
a : Tuple = np.random.randint(0 , len(lowerCAmelCase__ ) )
a : Union[str, Any] = True
if isinstance(input_mel[0] , lowerCAmelCase__ ):
a : Dict = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
a : Optional[int] = [[longer] for longer in is_longer]
a : Tuple = {"input_features": input_mel, "is_longer": is_longer}
a : Any = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
a : str = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 105 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowercase ( unittest.TestCase , _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ) -> Any:
_UpperCAmelCase : int = load_tool("""text-classification""" )
self.tool.setup()
_UpperCAmelCase : Tuple = load_tool("""text-classification""" ,remote=a_ )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Dict = self.remote_tool("""That's quite cool""" ,["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = self.tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.remote_tool(text="""That's quite cool""" ,labels=["""positive""", """negative"""] )
self.assertEqual(a_ ,"""positive""" )
| 215 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = '▁'
a_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
a_ = {
'google/pegasus-xsum': 512,
}
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PegasusTokenizer
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , snake_case : Tuple=None , snake_case : Optional[int]=None , snake_case : Tuple="<pad>" , snake_case : Dict="</s>" , snake_case : List[Any]="<unk>" , snake_case : str="<mask_2>" , snake_case : int="<mask_1>" , snake_case : Union[str, Any]=None , snake_case : int=1_0_3 , **snake_case : Tuple , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(snake_case , snake_case ):
raise TypeError(
f"additional_special_tokens should be of type {type(snake_case )}, but is"
f" {type(snake_case )}" )
UpperCamelCase_ : Union[str, Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(snake_case ) , self.offset - 1 )
]
if len(set(snake_case ) ) != len(snake_case ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
UpperCamelCase_ : int = additional_special_tokens_extended
else:
UpperCamelCase_ : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
snake_case , tokenizer_file=snake_case , pad_token=snake_case , eos_token=snake_case , unk_token=snake_case , mask_token=snake_case , mask_token_sent=snake_case , offset=snake_case , additional_special_tokens=snake_case , **snake_case , )
UpperCamelCase_ : List[str] = vocab_file
UpperCamelCase_ : Optional[int] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List , snake_case : Optional[List] = None , snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(snake_case )
elif token_ids_a is None:
return self._special_token_mask(snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Dict , snake_case : Union[str, Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ : int = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 364 | import math
import flax.linen as nn
import jax.numpy as jnp
def __lowercase ( lowerCamelCase : jnp.ndarray , lowerCamelCase : int , lowerCamelCase : float = 1 , lowerCamelCase : float = 1 , lowerCamelCase : float = 1.0e4 , lowerCamelCase : bool = False , lowerCamelCase : float = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even"
UpperCamelCase_ : Dict = float(embedding_dim // 2 )
UpperCamelCase_ : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCamelCase_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCamelCase_ : int = jnp.expand_dims(lowerCamelCase , 1 ) * jnp.expand_dims(lowerCamelCase , 0 )
# scale embeddings
UpperCamelCase_ : Tuple = scale * emb
if flip_sin_to_cos:
UpperCamelCase_ : Tuple = jnp.concatenate([jnp.cos(lowerCamelCase ), jnp.sin(lowerCamelCase )] , axis=1 )
else:
UpperCamelCase_ : Optional[int] = jnp.concatenate([jnp.sin(lowerCamelCase ), jnp.cos(lowerCamelCase )] , axis=1 )
UpperCamelCase_ : Optional[Any] = jnp.reshape(lowerCamelCase , [jnp.shape(lowerCamelCase )[0], embedding_dim] )
return signal
class _lowercase ( nn.Module ):
lowercase = 3_2
lowercase = jnp.floataa
@nn.compact
def __call__( self : str , snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(snake_case )
UpperCamelCase_ : int = nn.silu(snake_case )
UpperCamelCase_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(snake_case )
return temb
class _lowercase ( nn.Module ):
lowercase = 3_2
lowercase = False
lowercase = 1
@nn.compact
def __call__( self : int , snake_case : Any ) -> str:
"""simple docstring"""
return get_sinusoidal_embeddings(
snake_case , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 50 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : CLIPSegForImageSegmentation , UpperCAmelCase_ : CLIPSegProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ) ->List[Any]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , "steps_offset") and scheduler.config.steps_offset != 1:
lowerCamelCase__: List[str] =(
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_)
lowerCamelCase__: Tuple =dict(scheduler.config)
lowerCamelCase__: List[str] =1
lowerCamelCase__: Optional[int] =FrozenDict(UpperCAmelCase_)
if hasattr(scheduler.config , "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
lowerCamelCase__: Tuple =(
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =dict(scheduler.config)
lowerCamelCase__: str =True
lowerCamelCase__: Optional[Any] =FrozenDict(UpperCAmelCase_)
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .")
self.register_modules(
segmentation_model=UpperCAmelCase_ , segmentation_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Union[str, int]] = "auto") ->List[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__: Dict =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
lowerCamelCase__: Dict =torch.device("cuda")
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_)
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
if self.device != torch.device("meta") or not hasattr(self.unet , "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , "_hf_hook")
and hasattr(module._hf_hook , "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
def __call__(self : int , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : str , ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt").to(self.device)
lowerCamelCase__: Dict =self.segmentation_model(**UpperCAmelCase_)
lowerCamelCase__: Tuple =torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy()
lowerCamelCase__: List[Any] =self.numpy_to_pil(UpperCAmelCase_)[0].resize(image.size)
# Run inpainting pipeline with the generated mask
lowerCamelCase__: int =StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , )
| 10 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DeiTFeatureExtractor']
__a = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 | 0 |
import numpy as np
lowercase : Dict = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class __snake_case :
def __init__( self ):
'''simple docstring'''
lowercase : Union[str, Any] = np.array(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = np.where(letter == self.SQUARE )
lowercase : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = message.lower()
lowercase : List[Any] = message.replace(""" """ ,"""""" )
lowercase : Dict = message.replace("""j""" ,"""i""" )
lowercase : Tuple = np.empty((2, len(snake_case )) )
for letter_index in range(len(snake_case ) ):
lowercase : int = self.letter_to_numbers(message[letter_index] )
lowercase : Optional[int] = numbers[0]
lowercase : List[str] = numbers[1]
lowercase : Tuple = first_step.reshape(2 * len(snake_case ) )
lowercase : Optional[Any] = """"""
for numbers_index in range(len(snake_case ) ):
lowercase : Optional[Any] = int(second_step[numbers_index * 2] )
lowercase : str = int(second_step[(numbers_index * 2) + 1] )
lowercase : int = self.numbers_to_letter(snake_case ,snake_case )
lowercase : Optional[Any] = encoded_message + letter
return encoded_message
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = message.lower()
message.replace(""" """ ,"""""" )
lowercase : str = np.empty(2 * len(snake_case ) )
for letter_index in range(len(snake_case ) ):
lowercase : Optional[Any] = self.letter_to_numbers(message[letter_index] )
lowercase : Union[str, Any] = numbers[0]
lowercase : Union[str, Any] = numbers[1]
lowercase : Union[str, Any] = first_step.reshape((2, len(snake_case )) )
lowercase : List[str] = """"""
for numbers_index in range(len(snake_case ) ):
lowercase : Dict = int(second_step[0, numbers_index] )
lowercase : Dict = int(second_step[1, numbers_index] )
lowercase : int = self.numbers_to_letter(snake_case ,snake_case )
lowercase : Optional[Any] = decoded_message + letter
return decoded_message
| 365 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowercase : Tuple = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase ):
_a : Optional[Any]= ["input_features", "attention_mask"]
def __init__( self ,snake_case=80 ,snake_case=16000 ,snake_case=0.0 ,snake_case=10 ,snake_case=25 ,snake_case="hamming_window" ,snake_case=32_768.0 ,snake_case=0.97 ,snake_case=1.0 ,snake_case=True ,snake_case=True ,snake_case=False ,**snake_case ,):
'''simple docstring'''
super().__init__(feature_size=snake_case ,sampling_rate=snake_case ,padding_value=snake_case ,**snake_case )
lowercase : Optional[Any] = feature_size
lowercase : List[Any] = sampling_rate
lowercase : int = padding_value
lowercase : Dict = hop_length
lowercase : List[str] = win_length
lowercase : List[Any] = frame_signal_scale
lowercase : List[Any] = preemphasis_coeff
lowercase : str = mel_floor
lowercase : int = normalize_means
lowercase : List[Any] = normalize_vars
lowercase : List[Any] = win_function
lowercase : int = return_attention_mask
lowercase : Any = win_length * sampling_rate // 1000
lowercase : Tuple = hop_length * sampling_rate // 1000
lowercase : Tuple = optimal_fft_length(self.sample_size )
lowercase : Dict = (self.n_fft // 2) + 1
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.win_function == "hamming_window":
lowercase : Optional[Any] = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=snake_case )
else:
lowercase : Optional[Any] = window_function(window_length=self.sample_size ,name=self.win_function )
lowercase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowercase : Dict = spectrogram(
one_waveform * self.frame_signal_scale ,window=snake_case ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=snake_case ,preemphasis=self.preemphasis_coeff ,mel_filters=snake_case ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if self.normalize_means:
lowercase : List[Any] = x[:input_length].mean(axis=0 )
lowercase : Dict = np.subtract(snake_case ,snake_case )
if self.normalize_vars:
lowercase : List[Any] = x[:input_length].std(axis=0 )
lowercase : List[Any] = np.divide(snake_case ,snake_case )
if input_length < x.shape[0]:
lowercase : Any = padding_value
# make sure array is in float32
lowercase : Tuple = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case ,snake_case ,self.padding_value ) for x, n in zip(snake_case ,snake_case )]
def __call__( self ,snake_case ,snake_case = False ,snake_case = None ,snake_case = False ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,**snake_case ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowercase : List[Any] = isinstance(snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowercase : str = is_batched_numpy or (
isinstance(snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray(snake_case ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case ,np.ndarray ):
lowercase : int = np.asarray(snake_case ,dtype=np.floataa )
elif isinstance(snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Dict = [raw_speech]
# extract fbank features
lowercase : Tuple = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
lowercase : Union[str, Any] = BatchFeature({"""input_features""": features} )
lowercase : Optional[int] = self.pad(
snake_case ,padding=snake_case ,max_length=snake_case ,truncation=snake_case ,pad_to_multiple_of=snake_case ,return_attention_mask=snake_case ,**snake_case ,)
# make sure list is in array format
lowercase : Tuple = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,snake_case ):
lowercase : List[Any] = [np.asarray(snake_case ,dtype=np.floataa ) for feature in input_features]
lowercase : int = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowercase : Any = [np.asarray(snake_case ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowercase : List[str] = (
np.array(snake_case ,dtype=np.intaa )
if self._get_padding_strategies(snake_case ,max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowercase : List[str] = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=snake_case )
if return_tensors is not None:
lowercase : str = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 285 | 0 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
A__: Dict = logging.getLogger(__name__)
class A__ :
def __init__( self :int ) -> List[str]:
'''simple docstring'''
_a : Dict =False
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Any ) -> Union[str, Any]:
'''simple docstring'''
if not self.initialized:
_a : Optional[Any] =RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=SCREAMING_SNAKE_CASE , generator_tokenizer=SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , init_retrieval=SCREAMING_SNAKE_CASE , )
_a : Optional[Any] =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.retriever.index.init_index()
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
_a , _a : Optional[Any] =self.retriever._main_retrieve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return doc_ids, retrieved_doc_embeds
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int]=None ) -> str:
'''simple docstring'''
if index is not None and index.is_initialized() and len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=SCREAMING_SNAKE_CASE , generator_tokenizer=SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , init_retrieval=SCREAMING_SNAKE_CASE , )
_a : int =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for worker in self.retrieval_workers
] )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_a : List[str] =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
_a , _a : Dict =ray.get(random_worker.retrieve.remote(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
else:
_a , _a : Union[str, Any] =self._main_retrieve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE )
@classmethod
def __UpperCAmelCase ( cls :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple=None , **SCREAMING_SNAKE_CASE :Any ) -> List[str]:
'''simple docstring'''
return super(SCREAMING_SNAKE_CASE , cls ).get_tokenizers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@classmethod
def __UpperCAmelCase ( cls :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :str=None , **SCREAMING_SNAKE_CASE :List[Any] ) -> Dict:
'''simple docstring'''
_a : int =kwargs.pop("""config""" , SCREAMING_SNAKE_CASE ) or RagConfig.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[int] =RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
_a : Dict =rag_tokenizer.question_encoder
_a : Tuple =rag_tokenizer.generator
if indexed_dataset is not None:
_a : Dict ="""custom"""
_a : List[str] =CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE )
else:
_a : Any =cls._build_index(SCREAMING_SNAKE_CASE )
return cls(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=SCREAMING_SNAKE_CASE , generator_tokenizer=SCREAMING_SNAKE_CASE , retrieval_workers=SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , )
| 276 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
A__: Dict = None
A__: Tuple = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
A__: Any = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int]=1 ,_UpperCAmelCase : List[str]=256 ) -> Dict:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[str]:
with open(_UpperCAmelCase ,"""r""" ) as f:
return json.load(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
with open(_UpperCAmelCase ,"""w""" ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any]=True ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp""" )
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
_a : int =read_json(os.path.join(_UpperCAmelCase ,"""params.json""" ) )
_a : int =NUM_SHARDS[model_size]
_a : Dict =params["""n_layers"""]
_a : Union[str, Any] =params["""n_heads"""]
_a : List[str] =n_heads // num_shards
_a : int =params["""dim"""]
_a : Union[str, Any] =dim // n_heads
_a : int =1_0_0_0_0.0
_a : str =1.0 / (base ** (torch.arange(0 ,_UpperCAmelCase ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a : str =params["""n_kv_heads"""] # for GQA / MQA
_a : Optional[Any] =n_heads_per_shard // num_key_value_heads
_a : Optional[int] =dim // num_key_value_heads
else: # compatibility with other checkpoints
_a : str =n_heads
_a : Any =n_heads_per_shard
_a : str =dim
# permute for sliced rotary
def permute(_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int]=n_heads ,_UpperCAmelCase : Optional[int]=dim ,_UpperCAmelCase : List[str]=dim ):
return w.view(_UpperCAmelCase ,dima // n_heads // 2 ,2 ,_UpperCAmelCase ).transpose(1 ,2 ).reshape(_UpperCAmelCase ,_UpperCAmelCase )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a : Any =torch.load(os.path.join(_UpperCAmelCase ,"""consolidated.00.pth""" ) ,map_location="""cpu""" )
else:
# Sharded
_a : List[Any] =[
torch.load(os.path.join(_UpperCAmelCase ,F"consolidated.{i:02d}.pth" ) ,map_location="""cpu""" )
for i in range(_UpperCAmelCase )
]
_a : Any =0
_a : Optional[int] ={"""weight_map""": {}}
for layer_i in range(_UpperCAmelCase ):
_a : List[str] =F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a : Tuple ={
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_a : str =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Tuple =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,)
_a : Any =torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
for i in range(_UpperCAmelCase )
] ,dim=0 ,).reshape(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[str] =torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : Tuple =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(_UpperCAmelCase )] ,dim=1 )
_a : Union[str, Any] =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(_UpperCAmelCase )] ,dim=0 )
_a : str =inv_freq
for k, v in state_dict.items():
_a : Any =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Union[str, Any] =F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_a : List[str] ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_a : int ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_UpperCAmelCase )] ,dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_UpperCAmelCase )] ,dim=0 ),
}
for k, v in state_dict.items():
_a : Dict =filename
param_count += v.numel()
torch.save(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )
# Write configs
_a : Tuple ={"""total_size""": param_count * 2}
write_json(_UpperCAmelCase ,os.path.join(_UpperCAmelCase ,"""pytorch_model.bin.index.json""" ) )
_a : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_a : int =params["""multiple_of"""] if """multiple_of""" in params else 256
_a : List[Any] =LlamaConfig(
hidden_size=_UpperCAmelCase ,intermediate_size=compute_intermediate_size(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_UpperCAmelCase ,)
config.save_pretrained(_UpperCAmelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_a : Any =LlamaForCausalLM.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_UpperCAmelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_UpperCAmelCase ,safe_serialization=_UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Optional[Any]:
# Initialize the tokenizer based on the `spm` model
_a : List[str] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_a : List[Any] =tokenizer_class(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,)
parser.add_argument(
"""--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,)
parser.add_argument(
"""--output_dir""" ,help="""Location to write HF model and tokenizer""" ,)
parser.add_argument("""--safe_serialization""" ,type=_UpperCAmelCase ,help="""Whether or not to save using `safetensors`.""" )
_a : Optional[Any] =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
_a : List[Any] =os.path.join(args.input_dir ,"""tokenizer.model""" )
write_tokenizer(args.output_dir ,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 276 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __UpperCamelCase :
def __init__( self , __a = "cpu" , __a = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__a : List[str] = device
__a : Tuple = CLIPTokenizerFast.from_pretrained(__a )
__a : int = [0.48145466, 0.4578275, 0.40821073]
__a : str = [0.26862954, 0.26130258, 0.27577711]
__a : int = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__a : int = torchvision.transforms.Resize(224 )
__a : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Union[str, Any] = self.resize(__a )
__a : List[Any] = self.center_crop(__a )
__a : Optional[Any] = self.normalize(__a )
return images
def __call__( self , __a=None , __a=None , **__a ):
'''simple docstring'''
__a : Optional[int] = self.tokenizer(text=__a , **__a )
__a : Dict = self.preprocess_img(__a )
__a : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a=10 , __a=0.01 , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , __a=False , __a=True , __a="image" , __a=True , __a=False , __a=False , __a=False , ):
'''simple docstring'''
super().__init__()
__a : List[str] = None
__a : List[Any] = device if device else get_device()
if vqgan:
__a : int = vqgan
else:
__a : Dict = load_vqgan(self.device , conf_path=__a , ckpt_path=__a )
self.vqgan.eval()
if clip:
__a : Optional[Any] = clip
else:
__a : Dict = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
__a : Optional[Any] = ProcessorGradientFlow(device=self.device )
__a : str = iterations
__a : int = lr
__a : Optional[int] = log
__a : List[str] = make_grid
__a : Optional[int] = return_val
__a : int = quantize
__a : Any = self.vqgan.decoder.z_shape
def __UpperCAmelCase ( self , __a=None , __a=None , __a=5 , __a=True ):
'''simple docstring'''
__a : Union[str, Any] = []
if output_path is None:
__a : str = './animation.gif'
if input_path is None:
__a : List[str] = self.save_path
__a : Dict = sorted(glob(input_path + '/*' ) )
if not len(__a ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(__a ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
__a : Any = total_duration / len(__a )
__a : Optional[int] = [frame_duration] * len(__a )
if extend_frames:
__a : Tuple = 1.5
__a : Any = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(__a ) )
imageio.mimsave(__a , __a , duration=__a )
print(f"""gif saved to {output_path}""" )
def __UpperCAmelCase ( self , __a=None , __a=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
__a : Optional[int] = preprocess(Image.open(__a ) , target_image_size=256 ).to(self.device )
__a : Optional[Any] = preprocess_vqgan(__a )
__a , *__a : Optional[int] = self.vqgan.encode(__a )
return z
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : str = self.latent.detach().requires_grad_()
__a : str = base_latent + transform_vector
if self.quantize:
__a , *__a : Optional[Any] = self.vqgan.quantize(__a )
else:
__a : Tuple = trans_latent
return self.vqgan.decode(__a )
def __UpperCAmelCase ( self , __a , __a , __a=None ):
'''simple docstring'''
__a : Union[str, Any] = self.clip_preprocessor(text=__a , images=__a , return_tensors='pt' , padding=__a )
__a : Dict = self.clip(**__a )
__a : List[str] = clip_outputs.logits_per_image
if weights is not None:
__a : Tuple = similarity_logits * weights
return similarity_logits.sum()
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = self._get_clip_similarity(pos_prompts['prompts'] , __a , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
__a : List[Any] = self._get_clip_similarity(neg_prompts['prompts'] , __a , weights=neg_prompts['weights'] )
else:
__a : str = torch.tensor([1] , device=self.device )
__a : Optional[int] = -torch.log(__a ) + torch.log(__a )
return loss
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Dict = torch.randn_like(self.latent , requires_grad=__a , device=self.device )
__a : List[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__a : str = self._add_vector(__a )
__a : int = loop_post_process(__a )
__a : Optional[Any] = self._get_CLIP_loss(__a , __a , __a )
print('CLIP loss' , __a )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=__a )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
wandb.init(reinit=__a , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
__a : List[Any] = Image.open(__a )
__a : int = image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(__a ) )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if not prompts:
return []
__a : Tuple = []
__a : Union[str, Any] = []
if isinstance(__a , __a ):
__a : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(__a , (tuple, list) ):
__a : List[str] = prompt[0]
__a : Any = float(prompt[1] )
elif ":" in prompt:
__a , __a : Optional[Any] = prompt.split(':' )
__a : Optional[Any] = float(__a )
else:
__a : Union[str, Any] = prompt
__a : Optional[int] = 1.0
processed_prompts.append(__a )
weights.append(__a )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a , device=self.device ),
}
def __UpperCAmelCase ( self , __a , __a=None , __a=None , __a=True , __a=False , __a=True , __a=True , __a=None , ):
'''simple docstring'''
if image_path:
__a : Any = self._get_latent(__a )
else:
__a : Optional[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__a , __a , __a )
assert pos_prompts, "You must provide at least one positive prompt."
__a : Optional[int] = self.process_prompts(__a )
__a : Optional[int] = self.process_prompts(__a )
if save_final and save_path is None:
__a : int = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(__a ):
os.makedirs(__a )
else:
__a : Tuple = save_path + '_' + get_timestamp()
os.makedirs(__a )
__a : Optional[int] = save_path
__a : Union[str, Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(__a ) )
__a : List[Any] = loop_post_process(__a )
for iter, transformed_img in enumerate(self._optimize_CLIP(__a , __a , __a ) ):
if show_intermediate:
show_pil(__a )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'Image': wandb.Image(__a )} )
if show_final:
show_pil(__a )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 294 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCAmelCase : Tuple = Lock()
def a__ ( a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(a_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__SCREAMING_SNAKE_CASE = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__SCREAMING_SNAKE_CASE = min(a_ , a_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(a_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__SCREAMING_SNAKE_CASE = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__SCREAMING_SNAKE_CASE = max(a_ , a_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(a_ )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__SCREAMING_SNAKE_CASE = Pipe()
__SCREAMING_SNAKE_CASE = Pipe()
process_array_.append(
Process(
target=a_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__SCREAMING_SNAKE_CASE = temp_rs
__SCREAMING_SNAKE_CASE = temp_rr
for i in range(1 , len(a_ ) - 1 ):
__SCREAMING_SNAKE_CASE = Pipe()
__SCREAMING_SNAKE_CASE = Pipe()
process_array_.append(
Process(
target=a_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__SCREAMING_SNAKE_CASE = temp_rs
__SCREAMING_SNAKE_CASE = temp_rr
process_array_.append(
Process(
target=a_ , args=(
len(a_ ) - 1,
arr[len(a_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(a_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(a_ ) ):
__SCREAMING_SNAKE_CASE = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*a_ )
__SCREAMING_SNAKE_CASE = odd_even_transposition(a_ )
print("""Sorted List\n""" )
print(*a_ )
if __name__ == "__main__":
main()
| 267 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger()
def __A ( a_ :int , a_ :str , a_ :LevitConfig , a_ :Path , a_ :bool = True) -> Union[str, Any]:
print(F"""Converting {name}...""")
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__a : Optional[int] = timm.create_model('''levit_128s''' , pretrained=a_)
else:
__a : List[Any] = timm.create_model('''levit_128''' , pretrained=a_)
if hidden_sizes == 1_92:
__a : List[Any] = timm.create_model('''levit_192''' , pretrained=a_)
if hidden_sizes == 2_56:
__a : Any = timm.create_model('''levit_256''' , pretrained=a_)
if hidden_sizes == 3_84:
__a : Optional[int] = timm.create_model('''levit_384''' , pretrained=a_)
from_model.eval()
__a : Dict = LevitForImageClassificationWithTeacher(a_).eval()
__a : Optional[int] = OrderedDict()
__a : Tuple = from_model.state_dict()
__a : Dict = list(from_model.state_dict().keys())
__a : str = list(our_model.state_dict().keys())
print(len(a_) , len(a_))
for i in range(len(a_)):
__a : int = weights[og_keys[i]]
our_model.load_state_dict(a_)
__a : Union[str, Any] = torch.randn((2, 3, 2_24, 2_24))
__a : Union[str, Any] = from_model(a_)
__a : Optional[int] = our_model(a_).logits
assert torch.allclose(a_ , a_), "The model logits don't match the original one."
__a : List[Any] = name
print(a_)
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name)
__a : Tuple = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name)
print(F"""Pushed {checkpoint_name}""")
def __A ( a_ :Path , a_ :str = None , a_ :bool = True) -> Optional[Any]:
__a : List[Any] = '''imagenet-1k-id2label.json'''
__a : Tuple = 10_00
__a : List[str] = (1, num_labels)
__a : Union[str, Any] = '''huggingface/label-files'''
__a : Dict = num_labels
__a : List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : str = {int(a_): v for k, v in idalabel.items()}
__a : int = idalabel
__a : List[str] = {v: k for k, v in idalabel.items()}
__a : Optional[int] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_)
__a : Optional[int] = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
__a : int = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , a_ , names_to_config[model_name] , a_ , a_)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , a_ , a_ , a_ , a_)
return config, expected_shape
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
A = parser.parse_args()
A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 160 | 0 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
a : int = logging.get_logger(__name__)
logging.set_verbosity_info()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCAmelCase )
snake_case_ = XLMProphetNetForConditionalGeneration.from_pretrained(
__UpperCAmelCase, output_loading_info=__UpperCAmelCase )
else:
snake_case_ = ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCAmelCase )
snake_case_ = ProphetNetForConditionalGeneration.from_pretrained(
__UpperCAmelCase, output_loading_info=__UpperCAmelCase )
snake_case_ = ['''key_proj''', '''value_proj''', '''query_proj''']
snake_case_ = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
snake_case_ = key.split('''.''' )
if attributes[0] == "lm_head":
snake_case_ = prophet
snake_case_ = prophet_old
else:
snake_case_ = prophet.prophetnet
snake_case_ = prophet_old.model
snake_case_ = False
for attribute in attributes:
if attribute in mapping:
snake_case_ = mapping[attribute]
if not hasattr(__UpperCAmelCase, __UpperCAmelCase ) and len(__UpperCAmelCase ) > 0:
snake_case_ = attribute
elif hasattr(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case_ = old_model.weight
logger.info(F"{attribute} is initialized." )
snake_case_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case_ = old_model.bias
logger.info(F"{attribute} is initialized" )
snake_case_ = True
break
elif attribute in special_keys and hasattr(__UpperCAmelCase, '''in_proj_weight''' ):
snake_case_ = old_model.in_proj_weight.shape[0] // 3
snake_case_ = getattr(__UpperCAmelCase, __UpperCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
snake_case_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
snake_case_ = True
break
if attribute.isdigit():
snake_case_ = model[int(__UpperCAmelCase )]
snake_case_ = old_model[int(__UpperCAmelCase )]
else:
snake_case_ = getattr(__UpperCAmelCase, __UpperCAmelCase )
if old_attribute == "":
snake_case_ = old_model
else:
if not hasattr(__UpperCAmelCase, __UpperCAmelCase ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
snake_case_ = getattr(__UpperCAmelCase, __UpperCAmelCase )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a : Dict = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 366 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = FlaxAutoencoderKL
@property
def A_ ( self : List[Any] ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.uniform(lowercase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def A_ ( self : Tuple ):
snake_case_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
| 72 | 0 |
__A ='''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 19 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase ( lowerCAmelCase__ : Namespace ) -> Tuple:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase_ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _a ):
__a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_a , required=_a , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_a , required=_a , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_a , required=_a , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_a , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_a , default=_a , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a , _a , _a , *_a , ):
__a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
__a = model_type
__a = tf_checkpoint
__a = pytorch_dump_output
__a = config
__a = finetuning_task_name
def __UpperCAmelCase ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
if "ckpt" in self._tf_checkpoint.lower():
__a = self._tf_checkpoint
__a = ''''''
else:
__a = self._tf_checkpoint
__a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_a , self._config , self._pytorch_dump_output , _a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 45 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BILINEAR , _lowercase = True , _lowercase = None , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = None , _lowercase = None , **_lowercase , )-> List[str]:
super().__init__(**_a )
UpperCamelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCamelCase_ = get_size_dict(_a , default_to_square=_a )
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(_a , param_name="crop_size" )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BILINEAR , _lowercase = None , **_lowercase , )-> str:
UpperCamelCase_ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(_a , size["shortest_edge"] , default_to_square=_a )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , )-> Optional[Any]:
UpperCamelCase_ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , )-> Dict:
return rescale(_a , scale=_a , data_format=_a , **_a )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , )-> int:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def UpperCAmelCase_ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , )-> Dict:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase_ = to_numpy_array(_a )
if do_resize:
UpperCamelCase_ = self.resize(image=_a , size=_a , resample=_a )
if do_center_crop:
UpperCamelCase_ = self.center_crop(_a , size=_a )
if do_rescale:
UpperCamelCase_ = self.rescale(image=_a , scale=_a )
if do_normalize:
UpperCamelCase_ = self.normalize(image=_a , mean=_a , std=_a )
UpperCamelCase_ = to_channel_dimension_format(_a , _a )
return image
def UpperCAmelCase_ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , )-> Optional[int]:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_a , default_to_square=_a )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_a , param_name="crop_size" )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCamelCase_ = make_batched(_a )
UpperCamelCase_ = [
[
self._preprocess_image(
image=_a , do_resize=_a , size=_a , resample=_a , do_center_crop=_a , crop_size=_a , do_rescale=_a , rescale_factor=_a , do_normalize=_a , image_mean=_a , image_std=_a , data_format=_a , )
for img in video
]
for video in videos
]
UpperCamelCase_ = {"pixel_values": videos}
return BatchFeature(data=_a , tensor_type=_a )
| 363 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Dict:
"""simple docstring"""
UpperCamelCase_ = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
UpperCamelCase_ = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
UpperCamelCase_ = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase_ = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCamelCase_ = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "norm" in key:
UpperCamelCase_ = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase_ = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
UpperCamelCase_ = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "layer_norm1" in key:
UpperCamelCase_ = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCamelCase_ = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase_ = key[key.find("block" ) + len("block" )]
UpperCamelCase_ = key.replace(f"block{idx}" , f"block.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "attn.q" in key:
UpperCamelCase_ = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCamelCase_ = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCamelCase_ = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCamelCase_ = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCamelCase_ = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCamelCase_ = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCamelCase_ = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCamelCase_ = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase_ = key[key.find("linear_c" ) + len("linear_c" )]
UpperCamelCase_ = key.replace(f"linear_c{idx}" , f"linear_c.{int(SCREAMING_SNAKE_CASE_ )-1}" )
if "bot_conv" in key:
UpperCamelCase_ = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
UpperCamelCase_ = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
UpperCamelCase_ = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
UpperCamelCase_ = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
UpperCamelCase_ = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
UpperCamelCase_ = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
UpperCamelCase_ = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
UpperCamelCase_ = key.replace("module.last_layer_depth" , "head.head" )
UpperCamelCase_ = value
return new_state_dict
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase_ = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCamelCase_ = state_dict.pop(f"glpn.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase_ = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase_ = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase( )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None )-> int:
"""simple docstring"""
UpperCamelCase_ = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCamelCase_ = GLPNImageProcessor()
# prepare image
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
UpperCamelCase_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=torch.device("cpu" ) )
# rename keys
UpperCamelCase_ = rename_keys(SCREAMING_SNAKE_CASE_ )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# create HuggingFace model and load state dict
UpperCamelCase_ = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# forward pass
UpperCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase_ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase_ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f"Unknown model name: {model_name}" )
UpperCamelCase_ = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 60 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 136 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "Speech2TextFeatureExtractor"
lowercase__ = "Speech2TextTokenizer"
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.feature_extractor
lowercase_ = False
def __call__( self : Dict , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str]):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_)
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""")
lowercase_ = kwargs.pop("""raw_speech""")
else:
lowercase_ = kwargs.pop("""audio""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""sampling_rate""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""text""" , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
lowercase_ = args[0]
lowercase_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if audio is not None:
lowercase_ = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_)
if text is not None:
lowercase_ = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase_ = encodings["""input_ids"""]
return inputs
def _UpperCAmelCase ( self : List[str] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : str):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_)
@contextmanager
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""")
lowercase_ = True
lowercase_ = self.tokenizer
yield
lowercase_ = self.feature_extractor
lowercase_ = False
| 136 | 1 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a__ ( __lowercase , __lowercase=None ) -> Optional[int]:
_A = None
if token is not None:
_A = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_A = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_A = requests.get(__lowercase , headers=__lowercase ).json()
_A = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_A = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__lowercase ):
_A = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a__ ( __lowercase , __lowercase=None ) -> Dict:
_A = None
if token is not None:
_A = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_A = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_A = requests.get(__lowercase , headers=__lowercase ).json()
_A = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_A = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__lowercase ):
_A = requests.get(url + f"""&page={i + 2}""" , headers=__lowercase ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
_A = None
if token is not None:
_A = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
_A = requests.get(__lowercase , headers=__lowercase , allow_redirects=__lowercase )
_A = result.headers["Location"]
_A = requests.get(__lowercase , allow_redirects=__lowercase )
_A = os.path.join(__lowercase , f"""{artifact_name}.zip""" )
with open(__lowercase , "wb" ) as fp:
fp.write(response.content )
def a__ ( __lowercase , __lowercase=None ) -> List[Any]:
_A = []
_A = []
_A = None
with zipfile.ZipFile(__lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowercase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowercase ) as f:
for line in f:
_A = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A = line[: line.index(": " )]
_A = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_A = line[len("FAILED " ) :]
failed_tests.append(__lowercase )
elif filename == "job_name.txt":
_A = line
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__lowercase )} for `errors` """
f"""and {len(__lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
" problem." )
_A = None
if job_name and job_links:
_A = job_links.get(__lowercase , __lowercase )
# A list with elements of the form (line of error, error, failed test)
_A = [x + [y] + [job_link] for x, y in zip(__lowercase , __lowercase )]
return result
def a__ ( __lowercase , __lowercase=None ) -> Union[str, Any]:
_A = []
_A = [os.path.join(__lowercase , __lowercase ) for p in os.listdir(__lowercase ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowercase , job_links=__lowercase ) )
return errors
def a__ ( __lowercase , __lowercase=None ) -> Dict:
_A = Counter()
counter.update([x[1] for x in logs] )
_A = counter.most_common()
_A = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_A = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) )
return r
def a__ ( __lowercase ) -> str:
_A = test.split("::" )[0]
if test.startswith("tests/models/" ):
_A = test.split("/" )[2]
else:
_A = None
return test
def a__ ( __lowercase , __lowercase=None ) -> int:
_A = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A = [x for x in logs if x[2] is not None]
_A = {x[2] for x in logs}
_A = {}
for test in tests:
_A = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A = counter.most_common()
_A = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A = sum(error_counts.values() )
if n_errors > 0:
_A = {"count": n_errors, "errors": error_counts}
_A = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) )
return r
def a__ ( __lowercase ) -> str:
_A = "| no. | error | status |"
_A = "|-:|:-|:-|"
_A = [header, sep]
for error in reduced_by_error:
_A = reduced_by_error[error]["count"]
_A = f"""| {count} | {error[:100]} | |"""
lines.append(__lowercase )
return "\n".join(__lowercase )
def a__ ( __lowercase ) -> Optional[int]:
_A = "| model | no. of errors | major error | count |"
_A = "|-:|-:|-:|-:|"
_A = [header, sep]
for model in reduced_by_model:
_A = reduced_by_model[model]["count"]
_A , _A = list(reduced_by_model[model]["errors"].items() )[0]
_A = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__lowercase )
return "\n".join(__lowercase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
a_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a_ = get_job_links(args.workflow_run_id, token=args.token)
a_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a_ = k.find(" / ")
a_ = k[index + len(" / ") :]
a_ = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a_ = reduce_by_error(errors)
a_ = reduce_by_model(errors)
a_ = make_github_table(reduced_by_error)
a_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa) | 353 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class snake_case :
def __init__( self : Optional[int] , a__ : Tuple , a__ : str=1_00 , a__ : Dict=13 , a__ : Tuple=30 , a__ : str=2 , a__ : List[Any]=3 , a__ : Dict=True , a__ : Optional[Any]=True , a__ : List[Any]=32 , a__ : Tuple=4 , a__ : Tuple=4 , a__ : Optional[int]=37 , a__ : Tuple="gelu" , a__ : Optional[int]=0.1 , a__ : int=0.1 , a__ : Optional[Any]=10 , a__ : Optional[int]=0.0_2 , a__ : Dict=3 , a__ : str=None , a__ : Any=[0, 1, 2, 3] , ) -> Tuple:
'''simple docstring'''
_A = parent
_A = 1_00
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = out_indices
_A = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = num_patches + 1
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def a_ ( self : Any , a__ : List[str] , a__ : Tuple , a__ : List[str] , a__ : str ) -> Any:
'''simple docstring'''
_A = BeitModel(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : List[str] , a__ : Optional[Any] , a__ : Tuple , a__ : Any , a__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = BeitForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a_ ( self : Optional[Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[str] , a__ : Dict ) -> Dict:
'''simple docstring'''
_A = self.type_sequence_label_size
_A = BeitForImageClassification(a__ )
model.to(a__ )
model.eval()
_A = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A = 1
_A = BeitForImageClassification(a__ )
model.to(a__ )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self : Optional[Any] , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Dict ) -> str:
'''simple docstring'''
_A = self.num_labels
_A = BeitForSemanticSegmentation(a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_A = model(a__ , labels=a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_A = BeitModelTester(self )
_A = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def a_ ( self : int ) -> List[Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__ )
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a__ ), BeitForMaskedImageModeling]:
continue
_A = model_class(a__ )
model.to(a__ )
model.train()
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_A = model(**a__ ).loss
loss.backward()
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A = False
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_A = model_class(a__ )
model.gradient_checkpointing_enable()
model.to(a__ )
model.train()
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_A = model(**a__ ).loss
loss.backward()
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(a__ )
for model_class in self.all_model_classes:
_A = model_class(config=a__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = BeitModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a__ ( ) -> Tuple:
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase):
@cached_property
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).pixel_values.to(a__ )
# prepare bool_masked_pos
_A = torch.ones((1, 1_96) , dtype=torch.bool ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(pixel_values=a__ , bool_masked_pos=a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(a__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , a__ , atol=1E-2 ) )
@slow
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(a__ )
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1E-4 ) )
_A = 2_81
self.assertEqual(logits.argmax(-1 ).item() , a__ )
@slow
def a_ ( self : List[Any] ) -> int:
'''simple docstring'''
_A = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(a__ )
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1E-4 ) )
_A = 23_96
self.assertEqual(logits.argmax(-1 ).item() , a__ )
@slow
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
_A = model.to(a__ )
_A = BeitImageProcessor(do_resize=a__ , size=6_40 , do_center_crop=a__ )
_A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_A = Image.open(ds[0]["file"] )
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , a__ )
_A = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
_A = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=a__ , )
else:
_A = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=a__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a__ , atol=1E-4 ) )
@slow
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
_A = model.to(a__ )
_A = BeitImageProcessor(do_resize=a__ , size=6_40 , do_center_crop=a__ )
_A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_A = Image.open(ds[0]["file"] )
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=a__ , target_sizes=[(5_00, 3_00)] )
_A = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , a__ )
_A = image_processor.post_process_semantic_segmentation(outputs=a__ )
_A = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , a__ ) | 163 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : Optional[Any] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : List[Any] , __A : int=8 ) -> Optional[Any]:
"""simple docstring"""
a_ : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : VQModel , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , movq=SCREAMING_SNAKE_CASE__ , )
a_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
if latents is None:
a_ : Tuple = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a_ : Tuple = latents.to(SCREAMING_SNAKE_CASE__ )
a_ : int = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
a_ : int = torch.device(F"""cuda:{gpu_id}""" )
a_ : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
a_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ : Optional[int] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_module_hook=SCREAMING_SNAKE_CASE__ )
# We'll offload the last model manually.
a_ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ) -> Optional[int]:
a_ : Union[str, Any] = self._execution_device
a_ : int = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : List[Any] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : List[str] = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a_ : int = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Optional[Any] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Optional[Any] = hint.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
a_ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self.scheduler.timesteps
a_ : str = self.movq.config.latent_channels
a_ , a_ : Optional[Any] = downscale_height_and_width(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.movq_scale_factor )
# create initial latent
a_ : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
a_ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a_ : Dict = {'image_embeds': image_embeds, 'hint': hint}
a_ : List[Any] = self.unet(
sample=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , added_cond_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
if do_classifier_free_guidance:
a_ , a_ : str = noise_pred.split(latents.shape[1] , dim=1 )
a_ , a_ : Optional[Any] = noise_pred.chunk(2 )
a_ , a_ : Optional[Any] = variance_pred.chunk(2 )
a_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a_ : Optional[int] = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )[0]
# post-processing
a_ : Dict = self.movq.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a_ : str = image * 0.5 + 0.5
a_ : str = image.clamp(0 , 1 )
a_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a_ : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 32 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
snake_case : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
snake_case : Union[str, Any] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
__magic_name__ : List[str] = set(_snake_case )
return pairs
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__magic_name__ : Dict = vocab_file
__magic_name__ : Tuple = merges_file
__magic_name__ : List[Any] = {}
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = 1
__magic_name__ : int = 2
__magic_name__ : Union[str, Any] = 3
self.add_from_file(_a )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
__magic_name__ : List[str] = merges_handle.read().split("\n" )[:-1]
__magic_name__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__magic_name__ : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _a ):
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_a )
__magic_name__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ : Any = get_pairs(_a )
if not pairs:
return token
while True:
__magic_name__ : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : List[str] = []
__magic_name__ : List[str] = 0
while i < len(_a ):
try:
__magic_name__ : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(_a )
__magic_name__ : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__magic_name__ : List[Any] = get_pairs(_a )
__magic_name__ : Optional[int] = "@@ ".join(_a )
__magic_name__ : Tuple = word[:-4]
__magic_name__ : str = word
return word
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = []
__magic_name__ : Dict = re.findall(r"\S+\n?" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = " ".join(_a ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__magic_name__ : List[Any] = f.readlines()
for lineTmp in lines:
__magic_name__ : Optional[Any] = lineTmp.strip()
__magic_name__ : Union[str, Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__magic_name__ : Optional[int] = line[:idx]
__magic_name__ : Dict = len(self.encoder )
| 281 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (IPNDMScheduler,)
SCREAMING_SNAKE_CASE = (('num_inference_steps', 5_0),)
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = {"""num_train_timesteps""": 1000}
config.update(**_lowerCamelCase )
return config
def _a (self , _lowerCamelCase=0 , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase__ : List[str] = kwargs.pop("""num_inference_steps""" , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.dummy_sample
UpperCAmelCase__ : Optional[int] = 0.1 * sample
UpperCAmelCase__ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : List[str] = self.get_scheduler_config(**_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
UpperCAmelCase__ : Tuple = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase__ : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = scheduler_class.from_pretrained(_lowerCamelCase )
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
UpperCAmelCase__ : List[Any] = dummy_past_residuals[:]
UpperCAmelCase__ : List[Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
UpperCAmelCase__ : Tuple = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ : Any = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
UpperCAmelCase__ : Tuple = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a (self ):
"""simple docstring"""
pass
def _a (self , _lowerCamelCase=0 , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = dict(self.forward_default_kwargs )
UpperCAmelCase__ : int = kwargs.pop("""num_inference_steps""" , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = self.dummy_sample
UpperCAmelCase__ : Dict = 0.1 * sample
UpperCAmelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase__ : str = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ : Dict = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase__ : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
UpperCAmelCase__ : Tuple = scheduler_class.from_pretrained(_lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ : Optional[int] = dummy_past_residuals[:]
UpperCAmelCase__ : Tuple = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
UpperCAmelCase__ : Optional[int] = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCAmelCase__ : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
UpperCAmelCase__ : List[Any] = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase__ : Optional[int] = self.get_scheduler_config(**_lowerCamelCase )
UpperCAmelCase__ : List[Any] = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ : Tuple = 10
UpperCAmelCase__ : str = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ : Dict = model(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : str = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = dict(self.forward_default_kwargs )
UpperCAmelCase__ : Any = kwargs.pop("""num_inference_steps""" , _lowerCamelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ : Any = self.get_scheduler_config()
UpperCAmelCase__ : List[Any] = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ : int = self.dummy_sample
UpperCAmelCase__ : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowerCamelCase , """set_timesteps""" ):
scheduler.set_timesteps(_lowerCamelCase )
elif num_inference_steps is not None and not hasattr(_lowerCamelCase , """set_timesteps""" ):
UpperCAmelCase__ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase__ : Union[str, Any] = dummy_past_residuals[:]
UpperCAmelCase__ : str = scheduler.timesteps[5]
UpperCAmelCase__ : str = scheduler.timesteps[6]
UpperCAmelCase__ : List[Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
UpperCAmelCase__ : Optional[int] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase__ : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
UpperCAmelCase__ : Any = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a (self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase , time_step=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_lowerCamelCase , time_step=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.full_loop()
UpperCAmelCase__ : Tuple = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 166 |
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ : Tuple = pad_token_id
UpperCAmelCase__ : Any = max_length
UpperCAmelCase__ : str = vocab
UpperCAmelCase__ : Union[str, Any] = merges
UpperCAmelCase__ : Tuple = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase__ : Tuple = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase ):
"""simple docstring"""
return cls(**_lowerCamelCase )
def _a (self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.tf_tokenizer(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase__ : Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase__ , UpperCAmelCase__ : str = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 166 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if "cls_token" in name:
lowerCAmelCase__ : Dict = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowerCAmelCase__ : Any = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowerCAmelCase__ : str = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase__ : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase__ : List[Any] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase__ : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase__ : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase__ : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCAmelCase__ : Tuple = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCAmelCase__ : int = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCAmelCase__ : int = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowerCAmelCase__ : Tuple = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowerCAmelCase__ : Dict = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : Any = orig_state_dict.pop(UpperCamelCase )
if "qkv" in key:
lowerCAmelCase__ : str = key.split(""".""" )
lowerCAmelCase__ : Optional[int] = int(key_split[1] )
if "decoder_blocks" in key:
lowerCAmelCase__ : str = config.decoder_hidden_size
lowerCAmelCase__ : Any = """decoder.decoder_layers."""
if "weight" in key:
lowerCAmelCase__ : int = val[:dim, :]
lowerCAmelCase__ : Union[str, Any] = val[dim : dim * 2, :]
lowerCAmelCase__ : Optional[int] = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase__ : Dict = val[:dim]
lowerCAmelCase__ : Any = val[dim : dim * 2]
lowerCAmelCase__ : int = val[-dim:]
else:
lowerCAmelCase__ : Tuple = config.hidden_size
lowerCAmelCase__ : Any = """vit.encoder.layer."""
if "weight" in key:
lowerCAmelCase__ : List[Any] = val[:dim, :]
lowerCAmelCase__ : int = val[dim : dim * 2, :]
lowerCAmelCase__ : Optional[Any] = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase__ : int = val[:dim]
lowerCAmelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCAmelCase__ : Any = val[-dim:]
else:
lowerCAmelCase__ : List[str] = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = 1024
lowerCAmelCase__ : int = 4096
lowerCAmelCase__ : Any = 24
lowerCAmelCase__ : Any = 16
elif "huge" in checkpoint_url:
lowerCAmelCase__ : List[Any] = 14
lowerCAmelCase__ : str = 1280
lowerCAmelCase__ : Dict = 5120
lowerCAmelCase__ : Any = 32
lowerCAmelCase__ : Union[str, Any] = 16
lowerCAmelCase__ : Tuple = ViTMAEForPreTraining(UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="""cpu""" )["""model"""]
lowerCAmelCase__ : str = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase__ : str = convert_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
lowerCAmelCase__ : List[str] = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowerCAmelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
lowerCAmelCase__ : int = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase__ : Optional[Any] = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ : str = model(**UpperCamelCase )
lowerCAmelCase__ : List[Any] = outputs.logits
if "large" in checkpoint_url:
lowerCAmelCase__ : List[str] = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
lowerCAmelCase__ : int = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 37 | from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = parent
snake_case__ : List[Any] = batch_size
snake_case__ : str = image_size
snake_case__ : Union[str, Any] = patch_size
snake_case__ : int = num_channels
snake_case__ : Union[str, Any] = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : str = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : Any = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : int = initializer_range
snake_case__ : Dict = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : int = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> int:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = TFViTModel(config=__UpperCamelCase )
snake_case__ : Union[str, Any] = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case__ : Any = self.image_size // 2
snake_case__ : str = pixel_values[:, :, :image_size, :image_size]
snake_case__ : Dict = model(__UpperCamelCase , interpolate_pos_encoding=__UpperCamelCase , training=__UpperCamelCase )
snake_case__ : Any = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : int = self.type_sequence_label_size
snake_case__ : Optional[int] = TFViTForImageClassification(__UpperCamelCase )
snake_case__ : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case__ : str = self.image_size // 2
snake_case__ : Dict = pixel_values[:, :, :image_size, :image_size]
snake_case__ : Tuple = model(__UpperCamelCase , interpolate_pos_encoding=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Union[str, Any] = 1
snake_case__ : List[str] = TFViTForImageClassification(__UpperCamelCase )
snake_case__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Optional[int] = config_and_inputs
snake_case__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = TFViTModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __a ( self ) -> Tuple:
'''simple docstring'''
pass
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , tf.keras.layers.Layer ) )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = model_class(__UpperCamelCase )
snake_case__ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : str = [*signature.parameters.keys()]
snake_case__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : str = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__UpperCamelCase )
def UpperCamelCase__ ( ) -> int:
snake_case__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
snake_case__ : Any = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : Tuple = image_processor(images=__UpperCamelCase , return_tensors='tf' )
# forward pass
snake_case__ : str = model(**__UpperCamelCase )
# verify the logits
snake_case__ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case__ : List[Any] = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 )
| 143 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict:
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Union[str, Any] = seq_length
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Optional[Any] = use_input_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : List[str] = type_sequence_label_size
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : int = num_choices
_UpperCAmelCase : Dict = scope
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : int = None
if self.use_input_mask:
_UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[Any] = None
if self.use_labels:
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Tuple:
_UpperCAmelCase : int = DistilBertModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , A )
_UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Any:
_UpperCAmelCase : int = DistilBertForMaskedLM(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Any = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : Tuple = DistilBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(
A , attention_mask=A , start_positions=A , end_positions=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> str:
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = DistilBertForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Optional[int] = DistilBertForTokenClassification(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : str = self.num_choices
_UpperCAmelCase : Dict = DistilBertForMultipleChoice(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(_UpperCAmelCase) : int = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ =(
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ =True
a__ =True
a__ =True
a__ =True
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = DistilBertModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A , dim=3_7 )
def __lowerCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = DistilBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Union[str, Any] = model_class(config=A )
_UpperCAmelCase : List[Any] = self._prepare_for_class(A , A )
_UpperCAmelCase : int = torch.jit.trace(
A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A , os.path.join(A , '''traced_model.pt''' ) )
_UpperCAmelCase : Optional[int] = torch.jit.load(os.path.join(A , '''traced_model.pt''' ) , map_location=A )
loaded(inputs_dict['''input_ids'''].to(A ) , inputs_dict['''attention_mask'''].to(A ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Dict = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_UpperCAmelCase : List[str] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : str = model(A , attention_mask=A )[0]
_UpperCAmelCase : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , A )
_UpperCAmelCase : Any = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 364 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict:
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Union[str, Any] = seq_length
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Optional[Any] = use_input_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : Optional[int] = type_vocab_size
_UpperCAmelCase : List[str] = type_sequence_label_size
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : int = num_choices
_UpperCAmelCase : Dict = scope
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : int = None
if self.use_input_mask:
_UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[Any] = None
if self.use_labels:
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Tuple:
_UpperCAmelCase : int = DistilBertModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , A )
_UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Any:
_UpperCAmelCase : int = DistilBertForMaskedLM(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Any = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : Tuple = DistilBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(
A , attention_mask=A , start_positions=A , end_positions=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> str:
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = DistilBertForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Optional[int] = DistilBertForTokenClassification(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[Any]:
_UpperCAmelCase : str = self.num_choices
_UpperCAmelCase : Dict = DistilBertForMultipleChoice(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : int = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ =(
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ =True
a__ =True
a__ =True
a__ =True
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = DistilBertModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=A , dim=3_7 )
def __lowerCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = DistilBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Union[str, Any] = model_class(config=A )
_UpperCAmelCase : List[Any] = self._prepare_for_class(A , A )
_UpperCAmelCase : int = torch.jit.trace(
A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A , os.path.join(A , '''traced_model.pt''' ) )
_UpperCAmelCase : Optional[int] = torch.jit.load(os.path.join(A , '''traced_model.pt''' ) , map_location=A )
loaded(inputs_dict['''input_ids'''].to(A ) , inputs_dict['''attention_mask'''].to(A ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Dict = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_UpperCAmelCase : List[str] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : str = model(A , attention_mask=A )[0]
_UpperCAmelCase : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , A )
_UpperCAmelCase : Any = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 68 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCAmelCase = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Optional[Any]:
if rng is None:
_a : int = random.Random()
_a : List[Any] = 1
for dim in shape:
total_dims *= dim
_a : Optional[int] = []
for _ in range(lowerCAmelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_a : List[Any] = np.array(lowerCAmelCase_ , dtype=jnp.intaa ).reshape(lowerCAmelCase_ )
return output
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_a : Union[str, Any] = ids_tensor(lowerCAmelCase_ , vocab_size=2 , rng=lowerCAmelCase_ )
# make sure that at least one token is attended to for each batch
_a : Dict = 1
return attn_mask
@require_flax
class __magic_name__ :
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = ()
def __lowercase ( self : List[Any] ):
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_a : Dict = 2
_a : List[str] = inputs['input_ids'].shape[-1] // 2
_a : int = inputs['input_ids'][:max_batch_size, :sequence_length]
_a : Any = jnp.ones_like(_UpperCAmelCase )
_a : Tuple = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_a : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_a : List[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowercase ( self : Tuple ):
_a , _a , _a , _a : List[Any] = self._get_input_ids_and_config()
_a : str = False
_a : Dict = max_length
_a : Union[str, Any] = 0
for model_class in self.all_generative_model_classes:
_a : str = model_class(_UpperCAmelCase )
_a : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
_a : List[str] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[str] = pt_model_class(_UpperCAmelCase ).eval()
_a : Any = load_flax_weights_in_pytorch_model(_UpperCAmelCase ,flax_model.params )
_a : Optional[int] = flax_model.generate(_UpperCAmelCase ).sequences
_a : Optional[int] = pt_model.generate(torch.tensor(_UpperCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_a : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def __lowercase ( self : Any ):
_a , _a , _a , _a : Union[str, Any] = self._get_input_ids_and_config()
_a : Tuple = False
_a : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_a : Tuple = model_class(_UpperCAmelCase )
_a : str = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_UpperCAmelCase )
_a : int = jit(model.generate )
_a : str = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowercase ( self : List[str] ):
_a , _a , _a , _a : Union[str, Any] = self._get_input_ids_and_config()
_a : str = True
_a : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_a : Optional[Any] = model_class(_UpperCAmelCase )
_a : Union[str, Any] = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_UpperCAmelCase )
_a : List[str] = jit(model.generate )
_a : Union[str, Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowercase ( self : Tuple ):
_a , _a , _a , _a : Optional[int] = self._get_input_ids_and_config()
_a : Dict = False
_a : Optional[int] = max_length
_a : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_a : Dict = model_class(_UpperCAmelCase )
_a : Tuple = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_UpperCAmelCase )
_a : Optional[int] = jit(model.generate )
_a : Tuple = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowercase ( self : Optional[Any] ):
_a , _a , _a , _a : Dict = self._get_input_ids_and_config()
_a : Tuple = False
_a : Dict = max_length
_a : int = 2
_a : Dict = 2
for model_class in self.all_generative_model_classes:
_a : Any = model_class(_UpperCAmelCase )
_a : List[str] = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def __lowercase ( self : str ):
_a , _a , _a , _a : Optional[Any] = self._get_input_ids_and_config()
_a : List[str] = True
_a : Tuple = max_length
_a : int = 0.8
_a : List[Any] = 10
_a : List[Any] = 0.3
_a : Optional[int] = 1
_a : Union[str, Any] = 8
_a : int = 9
for model_class in self.all_generative_model_classes:
_a : Optional[Any] = model_class(_UpperCAmelCase )
_a : str = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_UpperCAmelCase )
_a : List[str] = jit(model.generate )
_a : Dict = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowercase ( self : int ):
_a , _a , _a , _a : Any = self._get_input_ids_and_config()
_a : Union[str, Any] = max_length
_a : Optional[Any] = 1
_a : List[Any] = 8
_a : Optional[int] = 9
for model_class in self.all_generative_model_classes:
_a : Tuple = model_class(_UpperCAmelCase )
_a : Any = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_UpperCAmelCase )
_a : List[str] = jit(model.generate )
_a : Any = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowercase ( self : Optional[int] ):
_a , _a , _a , _a : Tuple = self._get_input_ids_and_config()
_a : Tuple = max_length
_a : Any = 2
_a : Tuple = 1
_a : Any = 8
_a : Optional[int] = 9
for model_class in self.all_generative_model_classes:
_a : str = model_class(_UpperCAmelCase )
_a : Tuple = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_UpperCAmelCase )
_a : Tuple = jit(model.generate )
_a : List[Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowercase ( self : Union[str, Any] ):
_a , _a , _a , _a : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_a : Tuple = attention_mask.at[(0, 0)].set(0 )
_a : List[Any] = False
_a : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : str = model.generate(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_UpperCAmelCase )
_a : List[Any] = jit(model.generate )
_a : List[Any] = jit_generate(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowercase ( self : Optional[Any] ):
_a , _a , _a , _a : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_a : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_a : Dict = True
_a : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Dict = model.generate(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_UpperCAmelCase )
_a : Optional[int] = jit(model.generate )
_a : Tuple = jit_generate(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowercase ( self : Optional[int] ):
_a , _a , _a , _a : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_a : Tuple = attention_mask.at[(0, 0)].set(0 )
_a : Dict = 2
_a : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_a : str = model_class(_UpperCAmelCase )
_a : Optional[Any] = model.generate(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_UpperCAmelCase )
_a : Optional[Any] = jit(model.generate )
_a : List[str] = jit_generate(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
_a : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
_a : Optional[int] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_a : Optional[int] = 'Hello world'
_a : Optional[Any] = tokenizer(_UpperCAmelCase ,return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_UpperCAmelCase ,'do_samples' ):
model.generate(_UpperCAmelCase ,do_samples=_UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_UpperCAmelCase ,'foo' ):
_a : Optional[int] = {'foo': 'bar'}
model.generate(_UpperCAmelCase ,**_UpperCAmelCase )
| 89 |
"""simple docstring"""
def _A (__a = 50 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 91 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase__ : Optional[int] =logging.get_logger(__name__)
UpperCAmelCase__ : Any ={
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __A ( a ):
__A = """bart"""
__A = ["""past_key_values"""]
__A = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCAmelCase_=50265 , UpperCAmelCase_=1024 , UpperCAmelCase_=12 , UpperCAmelCase_=4096 , UpperCAmelCase_=16 , UpperCAmelCase_=12 , UpperCAmelCase_=4096 , UpperCAmelCase_=16 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_="gelu" , UpperCAmelCase_=1024 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=3 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_=True , UpperCAmelCase_=2 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ):
lowerCamelCase =vocab_size
lowerCamelCase =max_position_embeddings
lowerCamelCase =d_model
lowerCamelCase =encoder_ffn_dim
lowerCamelCase =encoder_layers
lowerCamelCase =encoder_attention_heads
lowerCamelCase =decoder_ffn_dim
lowerCamelCase =decoder_layers
lowerCamelCase =decoder_attention_heads
lowerCamelCase =dropout
lowerCamelCase =attention_dropout
lowerCamelCase =activation_dropout
lowerCamelCase =activation_function
lowerCamelCase =init_std
lowerCamelCase =encoder_layerdrop
lowerCamelCase =decoder_layerdrop
lowerCamelCase =classifier_dropout
lowerCamelCase =use_cache
lowerCamelCase =encoder_layers
lowerCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase_ ):
lowerCamelCase =self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
class __A ( a ):
@property
def _snake_case ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase ={0: """batch"""}
lowerCamelCase ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCamelCase ={0: """batch""", 1: """decoder_sequence"""}
lowerCamelCase ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase , lowerCamelCase =self.num_layers
for i in range(UpperCAmelCase_ ):
lowerCamelCase ={0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase ={0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCamelCase =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _snake_case ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase =super().outputs
else:
lowerCamelCase =super(UpperCAmelCase_ , self ).outputs
if self.use_past:
lowerCamelCase , lowerCamelCase =self.num_layers
for i in range(UpperCAmelCase_ ):
lowerCamelCase ={0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase ={0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = -1 , UpperCAmelCase_ = -1 , UpperCAmelCase_ = False , UpperCAmelCase_ = None , ):
lowerCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Generate decoder inputs
lowerCamelCase =seq_length if not self.use_past else 1
lowerCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase ={f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase =dict(**UpperCAmelCase_ , **UpperCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase =common_inputs["""input_ids"""].shape
lowerCamelCase =common_inputs["""decoder_input_ids"""].shape[1]
lowerCamelCase , lowerCamelCase =self.num_attention_heads
lowerCamelCase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase =decoder_seq_length + 3
lowerCamelCase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase =torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ )] , dim=1 )
lowerCamelCase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase , lowerCamelCase =self.num_layers
lowerCamelCase =min(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =max(UpperCAmelCase_ , UpperCAmelCase_ ) - min_num_layers
lowerCamelCase ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(UpperCAmelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase_ ),
torch.zeros(UpperCAmelCase_ ),
torch.zeros(UpperCAmelCase_ ),
torch.zeros(UpperCAmelCase_ ),
) )
# TODO: test this.
lowerCamelCase =encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(UpperCAmelCase_ , UpperCAmelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) )
return common_inputs
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = -1 , UpperCAmelCase_ = -1 , UpperCAmelCase_ = False , UpperCAmelCase_ = None , ):
lowerCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase =seqlen + 2
lowerCamelCase , lowerCamelCase =self.num_layers
lowerCamelCase , lowerCamelCase =self.num_attention_heads
lowerCamelCase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase =common_inputs["""attention_mask"""].dtype
lowerCamelCase =torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_ )] , dim=1 )
lowerCamelCase =[
(torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(UpperCAmelCase_ )
]
return common_inputs
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = -1 , UpperCAmelCase_ = -1 , UpperCAmelCase_ = False , UpperCAmelCase_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase =compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase =tokenizer.num_special_tokens_to_add(UpperCAmelCase_ )
lowerCamelCase =compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase =dict(tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) )
return common_inputs
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = -1 , UpperCAmelCase_ = -1 , UpperCAmelCase_ = False , UpperCAmelCase_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
elif self.task == "causal-lm":
lowerCamelCase =self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
else:
lowerCamelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
return common_inputs
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase =super()._flatten_past_key_values_(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
lowerCamelCase =super(UpperCAmelCase_ , self )._flatten_past_key_values_(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 262 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase__ : Optional[int] =logging.getLogger(__name__)
UpperCAmelCase__ : Tuple =50 # max width of layer names
UpperCAmelCase__ : List[str] =70 # max width of quantizer names
def _lowercase ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase =parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=_UpperCAmelCase , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=_UpperCAmelCase , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=_UpperCAmelCase , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=_UpperCAmelCase , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=_UpperCAmelCase , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=_UpperCAmelCase , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def _lowercase ( _UpperCAmelCase ) -> Dict:
if args.calibrator == "max":
lowerCamelCase ="""max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
lowerCamelCase ="""histogram"""
elif args.calibrator == "mse":
lowerCamelCase ="""histogram"""
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
lowerCamelCase =QuantDescriptor(num_bits=args.aprec , calib_method=_UpperCAmelCase )
lowerCamelCase =QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_UpperCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_UpperCAmelCase )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ) -> int:
logger.info("""Configuring Model for Quantization""" )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_UpperCAmelCase , ["""embeddings"""] , which="""weight""" , _disabled=_UpperCAmelCase )
if args.quant_disable:
set_quantizer_by_name(_UpperCAmelCase , [""""""] , _disabled=_UpperCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_UpperCAmelCase , args.quant_disable_keyword , _disabled=_UpperCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_UpperCAmelCase , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=_UpperCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_UpperCAmelCase , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=_UpperCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(_UpperCAmelCase )
if args.fuse_qkv:
fuse_qkv(_UpperCAmelCase , _UpperCAmelCase )
if args.clip_gelu:
clip_gelu(_UpperCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_UpperCAmelCase )
def _lowercase ( _UpperCAmelCase ) -> Optional[Any]:
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_UpperCAmelCase )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
def fusea(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for mod in [qq, qk, qv]:
if not hasattr(_UpperCAmelCase , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
lowerCamelCase =qq._amax.detach().item()
lowerCamelCase =qk._amax.detach().item()
lowerCamelCase =qv._amax.detach().item()
lowerCamelCase =max(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
qq._amax.fill_(_UpperCAmelCase )
qk._amax.fill_(_UpperCAmelCase )
qv._amax.fill_(_UpperCAmelCase )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
lowerCamelCase =mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_UpperCAmelCase )
lowerCamelCase =mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def _lowercase ( _UpperCAmelCase ) -> Dict:
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
lowerCamelCase =mod.weight.shape[0]
lowerCamelCase =mod._weight_quantizer._amax.detach()
lowerCamelCase =torch.ones(_UpperCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def _lowercase ( _UpperCAmelCase ) -> List[str]:
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCamelCase =set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCamelCase =set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCamelCase =pytorch_quantization.utils.reduce_amax(mod.weight , axis=_UpperCAmelCase , keepdims=_UpperCAmelCase ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
lowerCamelCase =amax
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=25 , _UpperCAmelCase=1_80 , _UpperCAmelCase=None ) -> Dict:
if ignore is None:
lowerCamelCase =[]
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =[ignore]
lowerCamelCase =0
for name, mod in model.named_modules():
if not hasattr(_UpperCAmelCase , """weight""" ):
continue
lowerCamelCase =max(_UpperCAmelCase , len(_UpperCAmelCase ) )
for name, mod in model.named_modules():
lowerCamelCase =getattr(_UpperCAmelCase , """_input_quantizer""" , _UpperCAmelCase )
lowerCamelCase =getattr(_UpperCAmelCase , """_weight_quantizer""" , _UpperCAmelCase )
if not hasattr(_UpperCAmelCase , """weight""" ):
continue
if type(_UpperCAmelCase ) in ignore:
continue
if [True for s in ignore if type(_UpperCAmelCase ) is str and s in name]:
continue
lowerCamelCase =F"""Act:{input_q.extra_repr()}"""
lowerCamelCase =F"""Wgt:{weight_q.extra_repr()}"""
lowerCamelCase =F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(_UpperCAmelCase ) <= line_width:
logger.info(_UpperCAmelCase )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def _lowercase ( _UpperCAmelCase ) -> Dict:
lowerCamelCase =0
for name, mod in model.named_modules():
if isinstance(_UpperCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase =getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if quantizer_mod is not None:
assert hasattr(_UpperCAmelCase , _UpperCAmelCase )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="both" , **_UpperCAmelCase ) -> List[str]:
lowerCamelCase =F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(_UpperCAmelCase , _UpperCAmelCase , """_input_quantizer""" , _UpperCAmelCase , _UpperCAmelCase )
if which in ["weight", "both"]:
set_quantizer(_UpperCAmelCase , _UpperCAmelCase , """_weight_quantizer""" , _UpperCAmelCase , _UpperCAmelCase )
logger.info(_UpperCAmelCase )
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> int:
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , """_input_quantizer""" ) or hasattr(_UpperCAmelCase , """_weight_quantizer""" ):
for n in names:
if re.search(_UpperCAmelCase , _UpperCAmelCase ):
set_quantizers(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info(_UpperCAmelCase )
| 262 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]:
try:
snake_case__ : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case__ : Dict = default
else:
# KEY is set, convert it to True or False.
try:
snake_case__ : List[str] = strtobool(_lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
__a = parse_flag_from_env("RUN_SLOW", default=False)
def __snake_case( _lowerCAmelCase ) -> List[Any]:
return unittest.skip("""Test was skipped""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Any:
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Any:
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Dict:
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Any:
if test_case is None:
return partial(_lowerCAmelCase , version=_lowerCAmelCase )
return unittest.skipUnless(is_torch_version(""">=""" , _lowerCAmelCase ) , f"test requires torch version >= {version}" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Any:
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(_lowerCAmelCase )
__a = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __snake_case( _lowerCAmelCase ) -> int:
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(_lowerCAmelCase )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
lowercase = True
@classmethod
def lowerCamelCase ( cls : Any ):
snake_case__ : Union[str, Any] = tempfile.mkdtemp()
@classmethod
def lowerCamelCase ( cls : int ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCamelCase ( self : Tuple ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(snake_case_ )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Any ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[int] , snake_case_ : Union[mock.Mock, List[mock.Mock]] ):
snake_case__ : Dict = mocks if isinstance(snake_case_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Union[str, Any] = AcceleratorState()
snake_case__ : int = tensor[None].clone().to(state.device )
snake_case__ : Optional[Any] = gather(_lowerCAmelCase ).cpu()
snake_case__ : str = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _lowerCAmelCase ):
return False
return True
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
snake_case__ : List[Any] = returncode
snake_case__ : List[Any] = stdout
snake_case__ : List[Any] = stderr
async def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
while True:
snake_case__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCAmelCase )
else:
break
async def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(_lowerCAmelCase ) )
snake_case__ : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case__ : List[Any] = []
snake_case__ : Any = []
def tee(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="" ):
snake_case__ : str = line.decode("""utf-8""" ).rstrip()
sink.append(_lowerCAmelCase )
if not quiet:
print(_lowerCAmelCase , _lowerCAmelCase , file=_lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCAmelCase : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCAmelCase : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=_lowerCAmelCase , )
return _RunOutput(await p.wait() , _lowerCAmelCase , _lowerCAmelCase )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=180 , _lowerCAmelCase=False , _lowerCAmelCase=True ) -> _RunOutput:
snake_case__ : Optional[Any] = asyncio.get_event_loop()
snake_case__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCAmelCase , env=_lowerCAmelCase , stdin=_lowerCAmelCase , timeout=_lowerCAmelCase , quiet=_lowerCAmelCase , echo=_lowerCAmelCase ) )
snake_case__ : List[Any] = """ """.join(_lowerCAmelCase )
if result.returncode > 0:
snake_case__ : List[str] = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
pass
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> List[Any]:
try:
snake_case__ : List[Any] = subprocess.check_output(_lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_lowerCAmelCase , """decode""" ):
snake_case__ : str = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(_lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 35 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE( lowercase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "mgp-str"
def __init__( self ,SCREAMING_SNAKE_CASE__=[32, 1_28] ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=27 ,SCREAMING_SNAKE_CASE__=38 ,SCREAMING_SNAKE_CASE__=5_02_57 ,SCREAMING_SNAKE_CASE__=3_05_22 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=4.0 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=1E-5 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.0_2 ,**SCREAMING_SNAKE_CASE__ ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**a__ )
__SCREAMING_SNAKE_CASE :str = image_size
__SCREAMING_SNAKE_CASE :Dict = patch_size
__SCREAMING_SNAKE_CASE :Dict = num_channels
__SCREAMING_SNAKE_CASE :int = max_token_length
__SCREAMING_SNAKE_CASE :Any = num_character_labels
__SCREAMING_SNAKE_CASE :Optional[Any] = num_bpe_labels
__SCREAMING_SNAKE_CASE :Any = num_wordpiece_labels
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE :Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = mlp_ratio
__SCREAMING_SNAKE_CASE :List[str] = distilled
__SCREAMING_SNAKE_CASE :Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE :Dict = drop_rate
__SCREAMING_SNAKE_CASE :str = qkv_bias
__SCREAMING_SNAKE_CASE :int = attn_drop_rate
__SCREAMING_SNAKE_CASE :Union[str, Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :List[str] = output_aa_attentions
__SCREAMING_SNAKE_CASE :str = initializer_range | 354 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"vocab_file": "spiece.model"}
lowerCamelCase_ = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
lowerCamelCase_ = {"bert_for_seq_generation": 5_1_2}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__="<s>" ,SCREAMING_SNAKE_CASE__="</s>" ,SCREAMING_SNAKE_CASE__="<unk>" ,SCREAMING_SNAKE_CASE__="<pad>" ,SCREAMING_SNAKE_CASE__="<::::>" ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :Dict = vocab_file
__SCREAMING_SNAKE_CASE :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.__dict__.copy()
__SCREAMING_SNAKE_CASE :Dict = None
return state
def __setstate__( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE :str = {}
__SCREAMING_SNAKE_CASE :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__SCREAMING_SNAKE_CASE :Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE :Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ ,'''wb''' ) as fi:
__SCREAMING_SNAKE_CASE :Tuple = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,) | 239 | 0 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Dict:
super().__init__(
features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : Optional[int] = Generator(
cache_dir=UpperCamelCase__ , features=UpperCamelCase__ , generator=UpperCamelCase__ , gen_kwargs=UpperCamelCase__ , **UpperCamelCase__ , )
def _lowercase ( self ) -> List[Any]:
# Build iterable dataset
if self.streaming:
lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split="train" )
# Build regular (map-style) dataset
else:
lowerCamelCase : List[str] = None
lowerCamelCase : Any = None
lowerCamelCase : List[str] = None
lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , )
lowerCamelCase : Any = self.builder.as_dataset(
split="train" , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 48 |
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48 | 1 |
from manim import *
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : Tuple = Rectangle(height=0.5 , width=0.5 )
__UpperCamelCase : Optional[int] = Rectangle(height=0.25 , width=0.25 )
__UpperCamelCase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCamelCase : Any = [mem.copy() for i in range(6 )]
__UpperCamelCase : Optional[int] = [mem.copy() for i in range(6 )]
__UpperCamelCase : List[Any] = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : Optional[int] = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : List[str] = VGroup(a , a ).arrange(a , buff=0 )
__UpperCamelCase : Optional[Any] = Text("CPU" , font_size=2_4 )
__UpperCamelCase : Optional[int] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a )
__UpperCamelCase : Tuple = [mem.copy() for i in range(4 )]
__UpperCamelCase : Dict = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : Tuple = Text("GPU" , font_size=2_4 )
__UpperCamelCase : Optional[int] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
gpu.move_to([-1, -1, 0] )
self.add(a )
__UpperCamelCase : Any = [mem.copy() for i in range(6 )]
__UpperCamelCase : Any = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : Dict = Text("Model" , font_size=2_4 )
__UpperCamelCase : List[str] = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
model.move_to([3, -1.0, 0] )
self.add(a )
__UpperCamelCase : List[str] = []
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : Optional[Any] = []
for i, rect in enumerate(a ):
rect.set_stroke(a )
__UpperCamelCase : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a , buff=0.0 )
self.add(a )
model_cpu_arr.append(a )
self.add(*a , *a , *a )
__UpperCamelCase : Any = [mem.copy() for i in range(6 )]
__UpperCamelCase : Tuple = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : Tuple = Text("Loaded Checkpoint" , font_size=2_4 )
__UpperCamelCase : Tuple = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
checkpoint.move_to([3, 0.5, 0] )
self.add(a )
__UpperCamelCase : List[str] = []
__UpperCamelCase : str = []
for i, rect in enumerate(a ):
__UpperCamelCase : Dict = fill.copy().set_fill(a , opacity=0.7 )
target.move_to(a )
ckpt_arr.append(a )
__UpperCamelCase : Any = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a )
self.add(*a , *a )
__UpperCamelCase : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCamelCase : Any = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a , a )
__UpperCamelCase : str = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=1_8 , )
blue_text.next_to(a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a )
__UpperCamelCase : Optional[Any] = MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
__UpperCamelCase : Tuple = [meta_mem.copy() for i in range(6 )]
__UpperCamelCase : Tuple = [meta_mem.copy() for i in range(6 )]
__UpperCamelCase : Dict = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : Dict = VGroup(*a ).arrange(a , buff=0 )
__UpperCamelCase : List[str] = VGroup(a , a ).arrange(a , buff=0 )
__UpperCamelCase : int = Text("Disk" , font_size=2_4 )
__UpperCamelCase : str = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(a , run_time=3 ) , Write(a , run_time=1 ) , Create(a , run_time=1 ) )
__UpperCamelCase : str = []
for i, rect in enumerate(a ):
__UpperCamelCase : Tuple = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a , run_time=1.5 ) )
self.play(*a )
self.play(FadeOut(a ) )
__UpperCamelCase : Tuple = MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(a , run_time=3 ) )
self.play(
FadeOut(a , a , *a , *a ) , )
self.wait() | 364 |
lowercase : Optional[int] = 9.8_0_6_6_5
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float = g) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("Impossible fluid density")
if volume < 0:
raise ValueError("Impossible Object volume")
if gravity <= 0:
raise ValueError("Impossible Gravity")
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 151 | 0 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 152 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : List[Any] , _a : List[str]=7 , _a : List[str]=3 , _a : Tuple=18 , _a : Tuple=30 , _a : str=400 , _a : Tuple=None , _a : Union[str, Any]=True , _a : List[str]=True , _a : Optional[int]=None , ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size if size is not None else {'height': 20, 'width': 20}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =do_convert_rgb
_SCREAMING_SNAKE_CASE =[512, 1024, 2048, 4096]
_SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else {'height': 16, 'width': 16}
def A ( self : Any ) -> List[str]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A ( self : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_SCREAMING_SNAKE_CASE =Image.open(requests.get(_a , stream=_a ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = PixaStructImageProcessor if is_vision_available() else None
def A ( self : Dict ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self )
@property
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_convert_rgb' ) )
def A ( self : Any ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_dummy_image()
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
_SCREAMING_SNAKE_CASE =2048
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='pt' , max_patches=_a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def A ( self : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_SCREAMING_SNAKE_CASE =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
_SCREAMING_SNAKE_CASE ='Hello'
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = PixaStructImageProcessor if is_vision_available() else None
def A ( self : str ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self , num_channels=4 )
_SCREAMING_SNAKE_CASE =3
@property
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_convert_rgb' ) )
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 47 | 0 |
def __lowercase ( _UpperCamelCase = 600851475143 ) ->int:
"""simple docstring"""
try:
lowercase : Optional[int] = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] = 2
lowercase : Optional[int] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : int = i
while n % i == 0:
lowercase : Any = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 361 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__a = 50_00_00
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Optional[Any] = dataset.map(**_UpperCamelCase )
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : int = dataset.filter(**_UpperCamelCase )
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Dict = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase : List[str] = generate_example_dataset(
os.path.join(_UpperCamelCase, '''dataset.arrow''' ), _UpperCamelCase, num_examples=_UpperCamelCase )
lowercase : List[Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=_UpperCamelCase )
def tokenize(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
lowercase : Union[str, Any] = map(_UpperCamelCase )
lowercase : Dict = map(_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''numpy''' ):
lowercase : Dict = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''pandas''' ):
lowercase : Any = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
lowercase : str = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
lowercase : List[str] = map(_UpperCamelCase, function=_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Any = filter(_UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCamelCase, '''wb''' ) as f:
f.write(json.dumps(_UpperCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 173 | 0 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
_snake_case = hf_hub_url(repo_id=UpperCamelCase_ , path=UpperCamelCase_ , revision=UpperCamelCase_ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(UpperCamelCase_ )}'''
| 288 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__snake_case = logging.get_logger(__name__)
__snake_case = TypeVar("""DatasetType""", Dataset, IterableDataset)
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ )
else:
return _interleave_iterable_datasets(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , stopping_strategy=UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(UpperCamelCase_ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase_ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase_ ).__name__}.' )
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
else:
return _concatenate_iterable_datasets(UpperCamelCase_ , info=UpperCamelCase_ , split=UpperCamelCase_ , axis=UpperCamelCase_ )
| 176 | 0 |
from collections.abc import Callable
def _lowerCAmelCase ( A__: Callable[[float], float] , A__: float , A__: float ):
'''simple docstring'''
UpperCAmelCase = a
UpperCAmelCase = b
if function(A__ ) == 0: # one of the a or b is a root for the function
return a
elif function(A__ ) == 0:
return b
elif (
function(A__ ) * function(A__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
UpperCAmelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(A__ ) == 0:
return mid
elif function(A__ ) * function(A__ ) < 0:
UpperCAmelCase = mid
else:
UpperCAmelCase = mid
UpperCAmelCase = start + (end - start) / 2.0
return mid
def _lowerCAmelCase ( A__: float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 152 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 152 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase (A__ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase : ArgumentParser ) -> Tuple:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
raise NotImplementedError()
| 165 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[Any] = "▁"
A_ : str = {"vocab_file": "sentencepiece.bpe.model"}
A_ : Union[str, Any] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
A_ : List[str] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __UpperCAmelCase : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : int ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 165 | 1 |
import math
def UpperCamelCase__( UpperCamelCase__ : int )->list:
A__ = [True] * n
A__ = False
A__ = False
A__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
A__ = i * 2
while index < n:
A__ = False
A__ = index + i
A__ = [2]
for i in range(3 , _UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def UpperCamelCase__( UpperCamelCase__ : int = 99_99_66_66_33_33 )->int:
A__ = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
A__ = prime_sieve(_UpperCAmelCase )
A__ = 0
A__ = 0
A__ = primes[prime_index]
while (last_prime**2) <= limit:
A__ = primes[prime_index + 1]
A__ = last_prime**2
A__ = next_prime**2
# Get numbers divisible by lps(current)
A__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 350 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: Union[str, Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39 | 0 |
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = 8.988e9 # units = N * m^s * C^-2
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> dict[str, float]:
_a : Tuple = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
_a : Dict = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_a : Dict = abs(lowerCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_a : int = abs(lowerCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_a : List[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCAmelCase_ )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = PegasusTokenizer
__lowercase : Any = PegasusTokenizerFast
__lowercase : Optional[int] = True
__lowercase : Tuple = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[str] = PegasusTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = '</s>'
lowercase__: Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_103 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowercase__: Dict = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
lowercase__: Tuple = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__: Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowercase__: Union[str, Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
lowercase__: int = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
lowercase__: int = 'To ensure a smooth flow of bank resolutions.'
lowercase__: Any = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
lowercase__: str = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Any = ['This is going to be way too long.' * 150, 'short example']
lowercase__: Tuple = ['not super long but more than 5 tokens', 'tiny']
lowercase__: Dict = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: Any = self._large_tokenizer(
text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# fmt: off
lowercase__: List[str] = {'input_ids': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = PegasusTokenizer
__lowercase : Any = PegasusTokenizerFast
__lowercase : Any = True
__lowercase : Dict = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: Union[str, Any] = PegasusTokenizer(lowerCAmelCase__ , offset=0 , mask_token_sent=lowerCAmelCase__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: str = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowercase__: List[Any] = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
lowercase__: Any = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[Any] = ['This is going to be way too long.' * 1_000, 'short example']
lowercase__: str = ['not super long but more than 5 tokens', 'tiny']
lowercase__: Tuple = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: Dict = self._large_tokenizer(
text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: str = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowercase__: Optional[int] = self._large_tokenizer(lowerCAmelCase__ ).input_ids
self.assertListEqual(
lowerCAmelCase__ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 196 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( __snake_case ):
def __init__( self , A_ , A_=None , A_=None , A_=0 ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 1.0 if scale is None else scale
UpperCamelCase : Optional[int] = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Union[str, Any] = args_dim
UpperCamelCase : str = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
UpperCamelCase : Union[str, Any] = domain_map
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class A__ ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : str = function
def __UpperCamelCase( self , A_ , *A_ ):
'''simple docstring'''
return self.function(A_ , *A_ )
class A__ :
_UpperCAmelCase :type
_UpperCAmelCase :int
_UpperCAmelCase :Dict[str, int]
def __init__( self , A_ = 1 ):
'''simple docstring'''
UpperCamelCase : Tuple = dim
UpperCamelCase : Union[str, Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , ):
'''simple docstring'''
UpperCamelCase : str = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 0.0
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCamelCase( self , *A_ ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
_UpperCAmelCase :type = StudentT
@classmethod
def __UpperCamelCase( cls , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCamelCase : int = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"loc": 1, "scale": 1}
_UpperCAmelCase :type = Normal
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"total_count": 1, "logits": 1}
_UpperCAmelCase :type = NegativeBinomial
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 140 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _lowercase ( ):
'''simple docstring'''
raise RuntimeError("""CUDA out of memory.""" )
class UpperCAmelCase__ ( nn.Module):
def __init__( self ) -> Dict:
super().__init__()
__UpperCamelCase = nn.Linear(3 , 4 )
__UpperCamelCase = nn.BatchNormad(4 )
__UpperCamelCase = nn.Linear(4 , 5 )
def __lowerCamelCase ( self , lowercase ) -> int:
return self.lineara(self.batchnorm(self.lineara(lowercase ) ) )
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowercase ):
nonlocal batch_sizes
batch_sizes.append(lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase , [1_2_8, 6_4, 3_2, 1_6, 8] )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowercase , lowercase ):
nonlocal batch_sizes
batch_sizes.append(lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCamelCase , __UpperCamelCase = mock_training_loop_function("""hello""" )
self.assertListEqual(lowercase , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def __lowerCamelCase ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase ):
pass
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def __lowerCamelCase ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowercase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def __lowerCamelCase ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(lowercase , lowercase , lowercase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function(1_2_8 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def __lowerCamelCase ( self ) -> str:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(lowercase ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(lowercase ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = torch.cuda.memory_allocated()
__UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase )
__UpperCamelCase = release_memory(lowercase )
self.assertEqual(torch.cuda.memory_allocated() , lowercase )
| 349 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349 | 1 |
from __future__ import annotations
import requests
_UpperCAmelCase = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCamelCase ( __lowercase : str ,__lowercase : int = 1 ,__lowercase : str = "new" ,__lowercase : list | None = None ):
'''simple docstring'''
A_ : Tuple = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
A_ : int = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__lowercase )
A_ : Optional[int] = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' ,headers={'User-agent': 'A random string'} ,)
if response.status_code == 4_29:
raise requests.HTTPError
A_ : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
A_ : Union[str, Any] = {}
for id_ in range(__lowercase ):
A_ : List[str] = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 192 | from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 192 | 1 |
def UpperCAmelCase_ ( __UpperCAmelCase : int = 1_00 ) -> int:
SCREAMING_SNAKE_CASE_ = (n * (n + 1) // 2) ** 2
SCREAMING_SNAKE_CASE_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''') | 225 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_ ( __UpperCAmelCase : bytes , __UpperCAmelCase : int ) -> np.array:
SCREAMING_SNAKE_CASE_ = f"{sampling_rate}"
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = 'f32le'
SCREAMING_SNAKE_CASE_ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__UpperCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE_ = ffmpeg_process.communicate(__UpperCAmelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
SCREAMING_SNAKE_CASE_ = output_stream[0]
SCREAMING_SNAKE_CASE_ = np.frombuffer(__UpperCAmelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : str = "f32le" , ) -> int:
SCREAMING_SNAKE_CASE_ = f"{sampling_rate}"
SCREAMING_SNAKE_CASE_ = '1'
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE_ = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE_ = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
SCREAMING_SNAKE_CASE_ = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE_ = 'alsa'
SCREAMING_SNAKE_CASE_ = 'default'
elif system == "Darwin":
SCREAMING_SNAKE_CASE_ = 'avfoundation'
SCREAMING_SNAKE_CASE_ = ':0'
elif system == "Windows":
SCREAMING_SNAKE_CASE_ = 'dshow'
SCREAMING_SNAKE_CASE_ = 'default'
SCREAMING_SNAKE_CASE_ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
SCREAMING_SNAKE_CASE_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE_ = _ffmpeg_stream(__UpperCAmelCase , __UpperCAmelCase )
for item in iterator:
yield item
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[Union[Tuple[float, float], float]] = None , __UpperCAmelCase : str = "f32le" , ) -> Tuple:
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE_ = stream_chunk_s
else:
SCREAMING_SNAKE_CASE_ = chunk_length_s
SCREAMING_SNAKE_CASE_ = ffmpeg_microphone(__UpperCAmelCase , __UpperCAmelCase , format_for_conversion=__UpperCAmelCase )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE_ = np.intaa
SCREAMING_SNAKE_CASE_ = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE_ = np.floataa
SCREAMING_SNAKE_CASE_ = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE_ = chunk_length_s / 6
SCREAMING_SNAKE_CASE_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCAmelCase , (int, float) ):
SCREAMING_SNAKE_CASE_ = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE_ = datetime.datetime.now()
SCREAMING_SNAKE_CASE_ = datetime.timedelta(seconds=__UpperCAmelCase )
for item in chunk_bytes_iter(__UpperCAmelCase , __UpperCAmelCase , stride=(stride_left, stride_right) , stream=__UpperCAmelCase ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE_ = np.frombuffer(item['raw'] , dtype=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple[int, int] , __UpperCAmelCase : bool = False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = b''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
SCREAMING_SNAKE_CASE_ = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCAmelCase ) < chunk_len:
SCREAMING_SNAKE_CASE_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCAmelCase ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE_ = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE_ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
SCREAMING_SNAKE_CASE_ = False
yield item
SCREAMING_SNAKE_CASE_ = stride_left
SCREAMING_SNAKE_CASE_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCAmelCase ) > stride_left:
SCREAMING_SNAKE_CASE_ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE_ = False
yield item
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 2**24 # 16Mo
try:
with subprocess.Popen(__UpperCAmelCase , stdout=subprocess.PIPE , bufsize=__UpperCAmelCase ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE_ = ffmpeg_process.stdout.read(__UpperCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 225 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["BeitFeatureExtractor"]
__magic_name__ = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase__ (lowerCAmelCase_ = 8 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
i -= len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = i // 3
__SCREAMING_SNAKE_CASE = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__SCREAMING_SNAKE_CASE = (
chars_incl
+ random(lowerCAmelCase_ , quotient + remainder )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
+ random(lowerCAmelCase_ , lowerCAmelCase_ )
)
__SCREAMING_SNAKE_CASE = list(lowerCAmelCase_ )
shuffle(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return "".join(secrets.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
pass # Put your code here...
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 8 ):
'''simple docstring'''
if len(lowerCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
__SCREAMING_SNAKE_CASE = any(char in ascii_uppercase for char in password )
__SCREAMING_SNAKE_CASE = any(char in ascii_lowercase for char in password )
__SCREAMING_SNAKE_CASE = any(char in digits for char in password )
__SCREAMING_SNAKE_CASE = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = int(input("Please indicate the max length of your password: " ).strip() )
__SCREAMING_SNAKE_CASE = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(lowerCAmelCase_ ) )
print(
"Alternative Password generated:" , alternative_password_generator(lowerCAmelCase_ , lowerCAmelCase_ ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 54 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Any = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowercase : List[str] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowercase : List[Any] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : int = '''whisper'''
__A : List[Any] = ['''past_key_values''']
__A : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase=5_1865 , lowercase=80 , lowercase=6 , lowercase=4 , lowercase=6 , lowercase=4 , lowercase=1536 , lowercase=1536 , lowercase=0.0 , lowercase=0.0 , lowercase=5_0257 , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=256 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=False , lowercase=1500 , lowercase=448 , lowercase=5_0256 , lowercase=5_0256 , lowercase=5_0256 , lowercase=None , lowercase=[220, 5_0256] , lowercase=False , lowercase=256 , lowercase=False , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase=7 , **lowercase , ) -> str:
'''simple docstring'''
a__ : int = vocab_size
a__ : int = num_mel_bins
a__ : Optional[int] = d_model
a__ : List[str] = encoder_layers
a__ : Dict = encoder_attention_heads
a__ : List[str] = decoder_layers
a__ : Tuple = decoder_attention_heads
a__ : List[str] = decoder_ffn_dim
a__ : Optional[Any] = encoder_ffn_dim
a__ : Tuple = dropout
a__ : Optional[int] = attention_dropout
a__ : Any = activation_dropout
a__ : Any = activation_function
a__ : List[Any] = init_std
a__ : Optional[int] = encoder_layerdrop
a__ : Union[str, Any] = decoder_layerdrop
a__ : Tuple = use_cache
a__ : List[str] = encoder_layers
a__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
a__ : Dict = max_source_positions
a__ : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a__ : Optional[int] = classifier_proj_size
a__ : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : List[Any] = apply_spec_augment
a__ : int = mask_time_prob
a__ : int = mask_time_length
a__ : List[Any] = mask_time_min_masks
a__ : str = mask_feature_prob
a__ : Optional[int] = mask_feature_length
a__ : Union[str, Any] = mask_feature_min_masks
a__ : Tuple = median_filter_width
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , suppress_tokens=lowercase , begin_suppress_tokens=lowercase , **lowercase , )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
@property
def __lowercase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
a__ : List[str] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
])
if self.use_past:
a__ : Optional[Any] = {0: 'batch'}
else:
a__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction='inputs')
return common_inputs
def __lowercase ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 2_2050 , lowercase = 5.0 , lowercase = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = OrderedDict()
a__ : int = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase , framework=lowercase , sampling_rate=lowercase , time_duration=lowercase , frequency=lowercase , )
a__ : List[Any] = encoder_inputs['input_features'].shape[2]
a__ : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
a__ : Any = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase , lowercase , lowercase , lowercase)
a__ : List[str] = encoder_inputs.pop('input_features')
a__ : Optional[int] = decoder_inputs.pop('decoder_input_ids')
if "past_key_values" in decoder_inputs:
a__ : List[str] = decoder_inputs.pop('past_key_values')
return dummy_inputs
@property
def __lowercase ( self) -> float:
'''simple docstring'''
return 1e-3
| 99 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : VQModel , _A : UNetaDModel , _A : DDIMScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_A , unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : Tuple , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : float = 0.0 , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , **_A : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_A , )
UpperCAmelCase__ : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ : List[Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase__ : List[str] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ : Tuple = {}
if accepts_eta:
UpperCAmelCase__ : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCAmelCase__ : List[Any] = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
UpperCAmelCase__ : Tuple = self.unet(_A , _A ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : Dict = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# decode the image latents with the VAE
UpperCAmelCase__ : Any = self.vqvae.decode(_A ).sample
UpperCAmelCase__ : int = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ : str = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 353 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ )
for i in range(length - 1 ):
UpperCAmelCase__ : Optional[Any] = i
for k in range(i + 1 , lowerCAmelCase__ ):
if collection[k] < collection[least]:
UpperCAmelCase__ : Dict = k
if least != i:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 299 | 0 |
def lowerCAmelCase_ ( A_):
if length <= 0 or not isinstance(A_ ,A_):
raise ValueError("Length must be a positive integer.")
return [n * (2 * n - 1) for n in range(A_)]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 149 |
def lowerCAmelCase_ ( A_ ,A_):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
else:
return a * actual_power(A_ ,int(b / 2)) * actual_power(A_ ,int(b / 2))
def lowerCAmelCase_ ( A_ ,A_):
if b < 0:
return 1 / actual_power(A_ ,A_)
return actual_power(A_ ,A_)
if __name__ == "__main__":
print(power(-2, -3))
| 149 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "luke"
def __init__( self , snake_case__=5_0267 , snake_case__=50_0000 , snake_case__=768 , snake_case__=256 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=True , snake_case__=None , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Any = entity_vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = entity_emb_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : int = type_vocab_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = use_entity_aware_attention
_lowerCAmelCase : Union[str, Any] = classifier_dropout
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
import math
import os
import sys
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE_ = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE_ = f"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase_ ( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> None:
lexicon.pop(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE_ = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE_ = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = '', ''
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
SCREAMING_SNAKE_CASE_ = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE_ = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = os.path.getsize(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = bin(__UpperCAmelCase )[2:]
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE_ = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE_ = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE_ = read_file_binary(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = compress_data(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 225 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : str=False ) -> List[str]:
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ''
else:
SCREAMING_SNAKE_CASE_ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE_ = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = val
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ = ViTMSNConfig()
SCREAMING_SNAKE_CASE_ = 10_00
SCREAMING_SNAKE_CASE_ = 'datasets/huggingface/label-files'
SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase ) , 'r' ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 3_84
SCREAMING_SNAKE_CASE_ = 15_36
SCREAMING_SNAKE_CASE_ = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 10_24
SCREAMING_SNAKE_CASE_ = 40_96
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = 10_24
SCREAMING_SNAKE_CASE_ = 40_96
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = ViTMSNModel(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='cpu' )['target_encoder']
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , base_model=__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(
size=config.image_size , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = image_processor(images=__UpperCAmelCase , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __UpperCAmelCase , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 225 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 264 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = checkpoints.load_tax_checkpoint(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = flatten_dict(__SCREAMING_SNAKE_CASE )
return flax_params
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : int = {}
lowercase_ : Any = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowercase_ : Tuple = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase_ : Tuple = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase_ : Optional[Any] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase_ : Optional[Any] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase_ : List[Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase_ : str = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = flax_dict[key]
lowercase_ : Any = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase_ : str = torch.from_numpy(converted_dict[key].T )
else:
lowercase_ : str = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
"""simple docstring"""
lowercase_ : List[str] = get_flax_param(__SCREAMING_SNAKE_CASE )
if not use_large:
lowercase_ : List[str] = PixaStructVisionConfig()
lowercase_ : Optional[Any] = PixaStructTextConfig()
else:
lowercase_ : Optional[int] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase_ : Dict = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowercase_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = PixaStructForConditionalGeneration(__SCREAMING_SNAKE_CASE )
lowercase_ : int = rename_and_convert_flax_params(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : str = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowercase_ : List[Any] = PixaStructImageProcessor()
lowercase_ : int = PixaStructProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
if use_large:
lowercase_ : Tuple = 4096
lowercase_ : Optional[int] = True
# mkdir if needed
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
print('''Model saved in {}'''.format(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_lowercase : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 264 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''mgp-str'''
def __init__(self , UpperCAmelCase=[3_2, 1_2_8] , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=2_7 , UpperCAmelCase=3_8 , UpperCAmelCase=5_0_2_5_7 , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=4.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=False , UpperCAmelCase=0.02 , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =max_token_length
_lowercase =num_character_labels
_lowercase =num_bpe_labels
_lowercase =num_wordpiece_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =mlp_ratio
_lowercase =distilled
_lowercase =layer_norm_eps
_lowercase =drop_rate
_lowercase =qkv_bias
_lowercase =attn_drop_rate
_lowercase =drop_path_rate
_lowercase =output_aa_attentions
_lowercase =initializer_range
| 5 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 0 |
'''simple docstring'''
lowerCAmelCase_ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase_ = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 332 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Dict = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
snake_case_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def A__ ( UpperCAmelCase_ ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCamelCase : Any = model_type_to_module_name(UpperCAmelCase_ )
_UpperCamelCase : List[str] = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(UpperCAmelCase_ , UpperCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCAmelCase_ , '__name__' , UpperCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCamelCase : List[Any] = importlib.import_module('transformers' )
if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
return getattr(UpperCAmelCase_ , UpperCAmelCase_ )
return None
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , **UpperCAmelCase_ , ):
_UpperCamelCase : Optional[Any] = get_file_from_repo(
UpperCAmelCase_ , UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , revision=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(UpperCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(UpperCAmelCase_ )
class lowercase__ :
def __init__( self : Tuple ):
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase__ )
def UpperCamelCase_ ( cls : Tuple ,lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase : int = kwargs.pop('config' ,lowerCamelCase__ )
_UpperCamelCase : List[Any] = kwargs.pop('trust_remote_code' ,lowerCamelCase__ )
_UpperCamelCase : Tuple = True
_UpperCamelCase , _UpperCamelCase : Dict = ImageProcessingMixin.get_image_processor_dict(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : List[Any] = config_dict.get('image_processor_type' ,lowerCamelCase__ )
_UpperCamelCase : Tuple = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_UpperCamelCase : Optional[int] = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCamelCase : Optional[Any] = config_dict.pop('feature_extractor_type' ,lowerCamelCase__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_UpperCamelCase : int = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_UpperCamelCase : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_UpperCamelCase : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = AutoConfig.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
# It could be in `config.image_processor_type``
_UpperCamelCase : List[Any] = getattr(lowerCamelCase__ ,'image_processor_type' ,lowerCamelCase__ )
if hasattr(lowerCamelCase__ ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_UpperCamelCase : List[str] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_UpperCamelCase : Union[str, Any] = image_processor_class_from_name(lowerCamelCase__ )
_UpperCamelCase : str = image_processor_auto_map is not None
_UpperCamelCase : Any = image_processor_class is not None or type(lowerCamelCase__ ) in IMAGE_PROCESSOR_MAPPING
_UpperCamelCase : Any = resolve_trust_remote_code(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if has_remote_code and trust_remote_code:
_UpperCamelCase : str = get_class_from_dynamic_module(
lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = kwargs.pop('code_revision' ,lowerCamelCase__ )
if os.path.isdir(lowerCamelCase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCamelCase__ ) in IMAGE_PROCESSOR_MAPPING:
_UpperCamelCase : int = IMAGE_PROCESSOR_MAPPING[type(lowerCamelCase__ )]
return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def UpperCamelCase_ ( lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(lowerCamelCase__ ,lowerCamelCase__ )
| 83 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a :
'''simple docstring'''
A : str
A : str = None
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self, A, A, A, **A ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
raise NotImplementedError
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
return F"`pip install {cls.pip_package or cls.name}`"
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Tuple = '''optuna'''
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_optuna_available()
def UpperCamelCase_ ( self, A, A, A, **A ):
'''simple docstring'''
return run_hp_search_optuna(A, A, A, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return default_hp_space_optuna(A )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = '''ray'''
A : List[str] = '''\'ray[tune]\''''
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_ray_available()
def UpperCamelCase_ ( self, A, A, A, **A ):
'''simple docstring'''
return run_hp_search_ray(A, A, A, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return default_hp_space_ray(A )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''sigopt'''
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_sigopt_available()
def UpperCamelCase_ ( self, A, A, A, **A ):
'''simple docstring'''
return run_hp_search_sigopt(A, A, A, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return default_hp_space_sigopt(A )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = '''wandb'''
@staticmethod
def UpperCamelCase_ ( ):
'''simple docstring'''
return is_wandb_available()
def UpperCamelCase_ ( self, A, A, A, **A ):
'''simple docstring'''
return run_hp_search_wandb(A, A, A, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return default_hp_space_wandb(A )
UpperCamelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : int = available_backends[0].name
if len(__UpperCamelCase ) > 1:
logger.info(
f"{len(__UpperCamelCase )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
f" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 246 |
'''simple docstring'''
import math
def lowercase__( __UpperCamelCase: float ,__UpperCamelCase: float ):
"""simple docstring"""
return math.pow(__UpperCamelCase ,2 ) - a
def lowercase__( __UpperCamelCase: float ):
"""simple docstring"""
return 2 * x
def lowercase__( __UpperCamelCase: float ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 2.0
while start <= a:
SCREAMING_SNAKE_CASE : Dict = math.pow(__UpperCamelCase ,2 )
return start
def lowercase__( __UpperCamelCase: float ,__UpperCamelCase: int = 99_99 ,__UpperCamelCase: float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_initial_point(__UpperCamelCase )
for _ in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = value
SCREAMING_SNAKE_CASE : Dict = value - fx(__UpperCamelCase ,__UpperCamelCase ) / fx_derivative(__UpperCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 246 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Union[str, Any] = 'linear'
a :Union[str, Any] = 'cosine'
a :List[str] = 'cosine_with_restarts'
a :Dict = 'polynomial'
a :Tuple = 'constant'
a :int = 'constant_with_warmup'
a :Union[str, Any] = 'piecewise_constant'
def a ( snake_case__: Optimizer , snake_case__: int = -1 ):
'''simple docstring'''
return LambdaLR(snake_case__ , lambda snake_case__ : 1 , last_epoch=snake_case__ )
def a ( snake_case__: Optimizer , snake_case__: int , snake_case__: int = -1 ):
'''simple docstring'''
def lr_lambda(snake_case__: int ):
if current_step < num_warmup_steps:
return float(snake_case__ ) / float(max(1.0 , snake_case__ ) )
return 1.0
return LambdaLR(snake_case__ , snake_case__ , last_epoch=snake_case__ )
def a ( snake_case__: Optimizer , snake_case__: str , snake_case__: int = -1 ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ = rule_str.split(''':''' )
lowercase_ = int(snake_case__ )
lowercase_ = float(snake_case__ )
lowercase_ = value
lowercase_ = float(rule_list[-1] )
def create_rules_function(snake_case__: Optional[int] , snake_case__: int ):
def rule_func(snake_case__: int ) -> float:
lowercase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(snake_case__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ = create_rules_function(snake_case__ , snake_case__ )
return LambdaLR(snake_case__ , snake_case__ , last_epoch=snake_case__ )
def a ( snake_case__: List[str] , snake_case__: List[Any] , snake_case__: Dict , snake_case__: int=-1 ):
'''simple docstring'''
def lr_lambda(snake_case__: int ):
if current_step < num_warmup_steps:
return float(snake_case__ ) / float(max(1 , snake_case__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(snake_case__ , snake_case__ , snake_case__ )
def a ( snake_case__: Optimizer , snake_case__: int , snake_case__: int , snake_case__: float = 0.5 , snake_case__: int = -1 ):
'''simple docstring'''
def lr_lambda(snake_case__: List[Any] ):
if current_step < num_warmup_steps:
return float(snake_case__ ) / float(max(1 , snake_case__ ) )
lowercase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case__ ) * 2.0 * progress )) )
return LambdaLR(snake_case__ , snake_case__ , snake_case__ )
def a ( snake_case__: Optimizer , snake_case__: int , snake_case__: int , snake_case__: int = 1 , snake_case__: int = -1 ):
'''simple docstring'''
def lr_lambda(snake_case__: Any ):
if current_step < num_warmup_steps:
return float(snake_case__ ) / float(max(1 , snake_case__ ) )
lowercase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case__ ) * progress) % 1.0) )) )
return LambdaLR(snake_case__ , snake_case__ , snake_case__ )
def a ( snake_case__: Dict , snake_case__: Dict , snake_case__: List[str] , snake_case__: Union[str, Any]=1e-7 , snake_case__: Tuple=1.0 , snake_case__: Optional[Any]=-1 ):
'''simple docstring'''
lowercase_ = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(snake_case__: int ):
if current_step < num_warmup_steps:
return float(snake_case__ ) / float(max(1 , snake_case__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ = lr_init - lr_end
lowercase_ = num_training_steps - num_warmup_steps
lowercase_ = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(snake_case__ , snake_case__ , snake_case__ )
__a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( snake_case__: Union[str, SchedulerType] , snake_case__: Optimizer , snake_case__: Optional[str] = None , snake_case__: Optional[int] = None , snake_case__: Optional[int] = None , snake_case__: int = 1 , snake_case__: float = 1.0 , snake_case__: int = -1 , ):
'''simple docstring'''
lowercase_ = SchedulerType(snake_case__ )
lowercase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(snake_case__ , last_epoch=snake_case__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(snake_case__ , step_rules=snake_case__ , last_epoch=snake_case__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(snake_case__ , num_warmup_steps=snake_case__ , last_epoch=snake_case__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , num_cycles=snake_case__ , last_epoch=snake_case__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , power=snake_case__ , last_epoch=snake_case__ , )
return schedule_func(
snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , last_epoch=snake_case__ )
| 30 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A__(nn.Module ):
"""simple docstring"""
_A : int
_A : int
_A : float = 0.0
_A : int = 1
_A : int = 1
_A : bool = True
_A : bool = False
_A : bool = False
_A : bool = False
_A : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Tuple:
a_ : int = []
a_ : List[Any] = []
for i in range(self.num_layers ):
a_ : Any = self.in_channels if i == 0 else self.out_channels
a_ : List[str] = FlaxResnetBlockaD(
in_channels=_lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
a_ : Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
a_ : List[str] = resnets
a_ : str = attentions
if self.add_downsample:
a_ : Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Optional[int]:
a_ : Optional[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
a_ : Any = resnet(_lowercase , _lowercase , deterministic=_lowercase )
a_ : Any = attn(_lowercase , _lowercase , deterministic=_lowercase )
output_states += (hidden_states,)
if self.add_downsample:
a_ : str = self.downsamplers_a(_lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class A__(nn.Module ):
"""simple docstring"""
_A : int
_A : int
_A : float = 0.0
_A : int = 1
_A : bool = True
_A : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Dict:
a_ : int = []
for i in range(self.num_layers ):
a_ : List[str] = self.in_channels if i == 0 else self.out_channels
a_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=_lowercase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
a_ : Tuple = resnets
if self.add_downsample:
a_ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase=True ) -> int:
a_ : Tuple = ()
for resnet in self.resnets:
a_ : Union[str, Any] = resnet(_lowercase , _lowercase , deterministic=_lowercase )
output_states += (hidden_states,)
if self.add_downsample:
a_ : List[Any] = self.downsamplers_a(_lowercase )
output_states += (hidden_states,)
return hidden_states, output_states
class A__(nn.Module ):
"""simple docstring"""
_A : int
_A : int
_A : int
_A : float = 0.0
_A : int = 1
_A : int = 1
_A : bool = True
_A : bool = False
_A : bool = False
_A : bool = False
_A : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Any:
a_ : Dict = []
a_ : Union[str, Any] = []
for i in range(self.num_layers ):
a_ : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
a_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
a_ : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
a_ : Any = resnets
a_ : Dict = attentions
if self.add_upsample:
a_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> int:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
a_ : Optional[Any] = res_hidden_states_tuple[-1]
a_ : Tuple = res_hidden_states_tuple[:-1]
a_ : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a_ : Dict = resnet(_lowercase , _lowercase , deterministic=_lowercase )
a_ : List[str] = attn(_lowercase , _lowercase , deterministic=_lowercase )
if self.add_upsample:
a_ : str = self.upsamplers_a(_lowercase )
return hidden_states
class A__(nn.Module ):
"""simple docstring"""
_A : int
_A : int
_A : int
_A : float = 0.0
_A : int = 1
_A : bool = True
_A : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Any:
a_ : List[str] = []
for i in range(self.num_layers ):
a_ : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
a_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
a_ : Optional[int] = resnets
if self.add_upsample:
a_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> int:
for resnet in self.resnets:
# pop res hidden states
a_ : int = res_hidden_states_tuple[-1]
a_ : List[Any] = res_hidden_states_tuple[:-1]
a_ : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
a_ : str = resnet(_lowercase , _lowercase , deterministic=_lowercase )
if self.add_upsample:
a_ : Any = self.upsamplers_a(_lowercase )
return hidden_states
class A__(nn.Module ):
"""simple docstring"""
_A : int
_A : float = 0.0
_A : int = 1
_A : int = 1
_A : bool = False
_A : bool = False
_A : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> List[Any]:
# there is always at least one resnet
a_ : Optional[int] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a_ : Optional[Any] = []
for _ in range(self.num_layers ):
a_ : List[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowercase )
a_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowercase )
a_ : Any = resnets
a_ : Tuple = attentions
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase=True ) -> Dict:
a_ : int = self.resnets[0](_lowercase , _lowercase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
a_ : Dict = attn(_lowercase , _lowercase , deterministic=_lowercase )
a_ : str = resnet(_lowercase , _lowercase , deterministic=_lowercase )
return hidden_states
| 248 | 0 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
A_ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
A_ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
A_ = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ), homepage="""https://github.com/hendrycks/math""", codebase_urls=["""https://github.com/hendrycks/math"""], )
def UpperCamelCase_ ( self: str, a_: Tuple, a_: str ):
'''simple docstring'''
_snake_case : Tuple = 0.0
for i, j in zip(a_, a_ ):
n_correct += 1.0 if math_equivalence.is_equiv(a_, a_ ) else 0.0
_snake_case : Any = n_correct / len(a_ )
return {
"accuracy": accuracy,
}
| 132 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 132 | 1 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a_ ( __snake_case : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
return flax_params
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCamelCase_ ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =flax_dict[key]
lowerCamelCase_ ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ =torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_flax_param(__snake_case )
if not use_large:
lowerCamelCase_ =PixaStructVisionConfig()
lowerCamelCase_ =PixaStructTextConfig()
else:
lowerCamelCase_ =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase_ =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case )
lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case )
lowerCamelCase_ =rename_and_convert_flax_params(__snake_case )
model.load_state_dict(__snake_case )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCamelCase_ =PixaStructImageProcessor()
lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case )
if use_large:
lowerCamelCase_ =4096
lowerCamelCase_ =True
# mkdir if needed
os.makedirs(__snake_case , exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
print('''Model saved in {}'''.format(__snake_case ) )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a_ : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 75 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=a ):
"""simple docstring"""
__magic_name__ :Optional[Any] = ["""onnx"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['onnx'] )
@classmethod
def snake_case ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['onnx'] ) | 364 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = """pt"""
elif is_tf_available():
__A = """tf"""
else:
__A = """jax"""
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = ByTaTokenizer
__magic_name__ :str = False
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2_0 , __UpperCAmelCase=5 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = []
for i in range(len(__UpperCAmelCase ) ):
try:
lowerCAmelCase__ :Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ :str = list(filter(lambda __UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) )
if max_length is not None and len(__UpperCAmelCase ) > max_length:
lowerCAmelCase__ :Optional[int] = toks[:max_length]
if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0:
while len(__UpperCAmelCase ) < min_length:
lowerCAmelCase__ :List[str] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ :int = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ :Optional[Any] = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
if " " not in output_txt and len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase )
)
if with_prefix_space:
lowerCAmelCase__ :Dict = ' ' + output_txt
lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
return output_txt, output_ids
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.ta_base_tokenizer
lowerCAmelCase__ :str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
lowerCAmelCase__ :Any = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.ta_base_tokenizer
lowerCAmelCase__ :int = 'Unicode €.'
lowerCAmelCase__ :Optional[int] = tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
lowerCAmelCase__ :Dict = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , 'Unicode €.</s>' )
lowerCAmelCase__ :Tuple = tokenizer('e è é ê ë' )
lowerCAmelCase__ :Any = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
lowerCAmelCase__ :List[Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ :Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCAmelCase__ :Dict = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
if FRAMEWORK != "jax":
lowerCAmelCase__ :Dict = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ :Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.ta_base_tokenizer
lowerCAmelCase__ :Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __UpperCAmelCase )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , __UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.ta_base_tokenizer
lowerCAmelCase__ :Tuple = [
'Summary of the text.',
'Another summary.',
]
lowerCAmelCase__ :Union[str, Any] = tokenizer(
text_target=__UpperCAmelCase , max_length=3_2 , padding='max_length' , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ :int = ['A long paragraph for summarization. </s>']
lowerCAmelCase__ :Tuple = ['Summary of the text. </s>']
# fmt: off
lowerCAmelCase__ :Union[str, Any] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
lowerCAmelCase__ :Dict = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
lowerCAmelCase__ :List[Any] = tokenizer(__UpperCAmelCase , text_target=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , batch['input_ids'][0] )
self.assertEqual(__UpperCAmelCase , batch['labels'][0] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase__ :List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ :int = tempfile.mkdtemp()
lowerCAmelCase__ :Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase__ :str = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :Any = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCAmelCase__ :Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase__ :Any = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ :List[str] = json.load(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ :Tuple = json.load(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = [F"<extra_id_{i}>" for i in range(1_2_5 )]
lowerCAmelCase__ :List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCAmelCase__ :Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ :Tuple = tokenizer_class.from_pretrained(
__UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ :Optional[int] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__UpperCAmelCase )]
lowerCAmelCase__ :List[str] = tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer_class.from_pretrained(__UpperCAmelCase )
self.assertTrue(tokenizer.decode([2_5_5] ) == '' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :List[Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
lowerCAmelCase__ :List[Any] = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowerCAmelCase__ :str = 0
lowerCAmelCase__ :Dict = tokenizer.convert_ids_to_tokens(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
for attr in attributes_list:
setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [] )
setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 254 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCAmelCase = CLIPTextModel(lowercase )
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _snake_case ( self , lowercase , lowercase=0 ) -> Union[str, Any]:
if str(lowercase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase )
else:
lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCAmelCase = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
lowerCAmelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = """."""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _snake_case ( self ) -> str:
lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowerCAmelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = """."""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowerCAmelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = """."""
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
lowerCAmelCase = output.images
assert image.shape == (1, 512, 768, 3)
| 46 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class A_ :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A_ :
UpperCAmelCase__ = field(default=a_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCAmelCase__ = field(
default=a_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=a_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowercase ( self ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A_ :
UpperCAmelCase__ = 42
UpperCAmelCase__ = True
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def __call__( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(_A ) for feature in features]
UpperCAmelCase = len(_A )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
UpperCAmelCase = list(chain(*_A ) )
UpperCAmelCase = self.tokenizer.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(_A , _A , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(_A , dtype=torch.intaa )
return batch
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"""ending{i}""" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
UpperCAmelCase = list(chain(*UpperCamelCase__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
UpperCAmelCase = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 273 | 0 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def lowercase () -> Tuple:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 355 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
# A local function to see if a dot lands in the circle.
def is_in_circle(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> bool:
SCREAMING_SNAKE_CASE = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(SCREAMING_SNAKE_CASE_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Callable[[float], float] , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for _ in range(SCREAMING_SNAKE_CASE_ ) ) * (max_value - min_value)
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 ) -> None:
def identity_function(SCREAMING_SNAKE_CASE_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> None:
def function_to_integrate(SCREAMING_SNAKE_CASE_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE = area_under_curve_estimator(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = XLMRobertaTokenizer
a__ = XLMRobertaTokenizerFast
a__ = True
a__ = True
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = XLMRobertaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
__magic_name__ = '<pad>'
__magic_name__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1002 )
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = XLMRobertaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
__magic_name__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__magic_name__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__magic_name__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__magic_name__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(UpperCamelCase__ )
__magic_name__ = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__magic_name__ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(UpperCamelCase__ )
__magic_name__ = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=True
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
__magic_name__ = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(UpperCamelCase__ )
__magic_name__ = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=False
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
__magic_name__ = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__magic_name__ = tokenizer_r.from_pretrained(UpperCamelCase__ )
__magic_name__ = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
@cached_property
def _lowercase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
__magic_name__ = XLMRobertaTokenizer(f.name , keep_accents=UpperCamelCase__ )
__magic_name__ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = 'I was born in 92000, and this is falsé.'
__magic_name__ = tokenizer.tokenize(UpperCamelCase__ )
__magic_name__ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__magic_name__ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = tokenizer.encode(UpperCamelCase__ )
__magic_name__ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = 'Hello World!'
__magic_name__ = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__magic_name__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = {'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowercase : Optional[int] = logging.get_logger(__name__)
def lowerCamelCase__ ( A : Optional[Any] , A : Optional[Any] ):
'''simple docstring'''
try:
with open(A , '''rb''' ) as flax_state_f:
UpperCAmelCase = from_bytes(A , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(A ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(A , A )
def lowerCamelCase__ ( A : Optional[Any] , A : Optional[Any] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda A : x.dtype == jnp.bfloataa , A ) ).values()
if any(A ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
UpperCAmelCase = jax.tree_util.tree_map(
lambda A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A )
UpperCAmelCase = ''''''
UpperCAmelCase = flatten_dict(A , sep='''.''' )
UpperCAmelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCAmelCase = []
UpperCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCAmelCase = flax_key_tuple_array[:-1] + ['''weight''']
UpperCAmelCase = jnp.transpose(A , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCAmelCase = flax_key_tuple_array[:-1] + ['''weight''']
UpperCAmelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCAmelCase = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(A ):
UpperCAmelCase = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
UpperCAmelCase = '''.'''.join(A )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase = np.asarray(A ) if not isinstance(A , np.ndarray ) else flax_tensor
UpperCAmelCase = torch.from_numpy(A )
# remove from missing keys
missing_keys.remove(A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A )
pt_model.load_state_dict(A )
# re-transform missing_keys to list
UpperCAmelCase = list(A )
if len(A ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(A ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
return pt_model
| 91 |
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
while a != 0:
UpperCAmelCase , UpperCAmelCase = b % a, a
return b
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
if gcd(A , A ) != 1:
UpperCAmelCase = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1, 0, a
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0, 1, m
while va != 0:
UpperCAmelCase = ua // va
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 91 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A : str = logging.get_logger(__name__)
__A : Union[str, Any] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__A : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase ( __snake_case : str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase_ : Dict = model_type_to_module_name(__snake_case )
lowercase_ : Optional[Any] = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase_ : str = importlib.import_module('''transformers''' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def lowercase ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : List[str] , ):
lowercase_ : Optional[Any] = get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(__snake_case , encoding='''utf-8''' ) as reader:
return json.load(__snake_case )
class _UpperCAmelCase :
def __init__( self : Tuple ) -> Union[str, Any]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def A ( cls : Union[str, Any] , A : List[Any] , **A : Tuple ) -> Dict:
lowercase_ : Tuple = kwargs.pop('''config''' , A )
lowercase_ : Tuple = kwargs.pop('''trust_remote_code''' , A )
lowercase_ : Optional[Any] = True
lowercase_ , lowercase_ : Optional[Any] = ImageProcessingMixin.get_image_processor_dict(A , **A )
lowercase_ : Union[str, Any] = config_dict.get('''image_processor_type''' , A )
lowercase_ : Tuple = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
lowercase_ : Optional[int] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowercase_ : Union[str, Any] = config_dict.pop('''feature_extractor_type''' , A )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
lowercase_ : List[Any] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowercase_ : str = config_dict['''auto_map''']['''AutoFeatureExtractor''']
lowercase_ : int = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(A , A ):
lowercase_ : List[Any] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.image_processor_type``
lowercase_ : Tuple = getattr(A , '''image_processor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
lowercase_ : Tuple = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
lowercase_ : Tuple = image_processor_class_from_name(A )
lowercase_ : Tuple = image_processor_auto_map is not None
lowercase_ : Any = image_processor_class is not None or type(A ) in IMAGE_PROCESSOR_MAPPING
lowercase_ : Tuple = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowercase_ : Dict = get_class_from_dynamic_module(
A , A , **A )
lowercase_ : List[str] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(A , **A )
elif image_processor_class is not None:
return image_processor_class.from_dict(A , **A )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(A ) in IMAGE_PROCESSOR_MAPPING:
lowercase_ : int = IMAGE_PROCESSOR_MAPPING[type(A )]
return image_processor_class.from_dict(A , **A )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def A ( A : int , A : Dict ) -> Dict:
IMAGE_PROCESSOR_MAPPING.register(A , A )
| 33 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 't5'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any],lowercase_ : int=3_2_1_2_8,lowercase_ : int=5_1_2,lowercase_ : List[str]=6_4,lowercase_ : Tuple=2_0_4_8,lowercase_ : Any=6,lowercase_ : List[str]=None,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_2,lowercase_ : Dict=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=1.0,lowercase_ : Any="relu",lowercase_ : Union[str, Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : int=0,lowercase_ : str=1,**lowercase_ : str,)-> Any:
'''simple docstring'''
A__ = vocab_size
A__ = d_model
A__ = d_kv
A__ = d_ff
A__ = num_layers
A__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A__ = num_heads
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = feed_forward_proj
A__ = use_cache
A__ = self.feed_forward_proj.split('-' )
A__ = act_info[-1]
A__ = act_info[0] == 'gated'
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A__ = 'gelu_new'
super().__init__(
pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,)
class A ( _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Tuple )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
A__ = 'past_encoder_sequence + sequence'
A__ = {0: 'batch'}
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'decoder_sequence'}
A__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_,direction='inputs' )
return common_inputs
@property
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
return 1_3
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase :
def __init__( self, lowercase_ = 0 ) -> Optional[int]:
snake_case = key
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> list[str]:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_ ) ^ key ) for ch in content]
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> list[str]:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(lowercase_ ) ^ key ) for ch in content]
def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> str:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case = ''
for ch in content:
ans += chr(ord(lowercase_ ) ^ key )
return ans
def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> str:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
snake_case = ''
for ch in content:
ans += chr(ord(lowercase_ ) ^ key )
return ans
def _lowerCamelCase ( self, lowercase_, lowercase_ = 0 ) -> bool:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
try:
with open(lowercase_ ) as fin, open('encrypt.out', 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowercase_, lowercase_ ) )
except OSError:
return False
return True
def _lowerCamelCase ( self, lowercase_, lowercase_ ) -> bool:
assert isinstance(lowercase_, lowercase_ ) and isinstance(lowercase_, lowercase_ )
try:
with open(lowercase_ ) as fin, open('decrypt.out', 'w+' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowercase_, lowercase_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 332 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : torch.FloatTensor
lowercase : torch.FloatTensor
lowercase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : List[str] =2
@register_to_config
def __init__( self, lowerCAmelCase = 0.0_2, lowerCAmelCase = 100, lowerCAmelCase = 1.0_0_7, lowerCAmelCase = 80, lowerCAmelCase = 0.0_5, lowerCAmelCase = 50, ):
"""simple docstring"""
lowerCamelCase_ =sigma_max
# setable values
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None # sigma(t_i)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =num_inference_steps
lowerCamelCase_ =np.arange(0, self.num_inference_steps )[::-1].copy()
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).to(lowerCAmelCase )
lowerCamelCase_ =[
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCamelCase_ =torch.tensor(lowerCAmelCase, dtype=torch.floataa, device=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ =min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1 )
else:
lowerCamelCase_ =0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ =self.config.s_noise * randn_tensor(sample.shape, generator=lowerCAmelCase ).to(sample.device )
lowerCamelCase_ =sigma + gamma * sigma
lowerCamelCase_ =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =sample_hat + sigma_hat * model_output
lowerCamelCase_ =(sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ =sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase, derivative=lowerCAmelCase, pred_original_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =sample_prev + sigma_prev * model_output
lowerCamelCase_ =(sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase, derivative=lowerCAmelCase, pred_original_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
| 75 | from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __snake_case :
__lowerCamelCase = XGLMConfig
__lowerCamelCase = {}
__lowerCamelCase = """gelu"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=14 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=0.0_2 , ) -> str:
'''simple docstring'''
snake_case__ : Any = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : Optional[int] = use_input_mask
snake_case__ : Any = use_labels
snake_case__ : List[str] = vocab_size
snake_case__ : List[Any] = d_model
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : str = ffn_dim
snake_case__ : Optional[Any] = activation_function
snake_case__ : str = activation_dropout
snake_case__ : int = attention_dropout
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Optional[int] = initializer_range
snake_case__ : List[str] = None
snake_case__ : List[str] = 0
snake_case__ : Optional[int] = 2
snake_case__ : Union[str, Any] = 1
def __a ( self ) -> List[str]:
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = self.get_config()
snake_case__ : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __a ( self ) -> Any:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCamelCase , )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Any = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Tuple = config_and_inputs
snake_case__ : Tuple = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = TFXGLMModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=__UpperCamelCase , n_embd=37 )
def __a ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = TFXGLMModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __a ( self ) -> Any:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self , __UpperCamelCase=True ) -> int:
'''simple docstring'''
snake_case__ : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
snake_case__ : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case__ : List[str] = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
snake_case__ : int = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCamelCase )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
snake_case__ : Dict = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
snake_case__ : Any = tokenizer('Today is a nice day and' , return_tensors='tf' )
snake_case__ : Dict = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
snake_case__ : Optional[int] = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase , seed=[7, 0] )
snake_case__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : str = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
snake_case__ : Optional[int] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
snake_case__ : Any = 'left'
# use different length sentences to test batching
snake_case__ : int = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
snake_case__ : Any = tokenizer(__UpperCamelCase , return_tensors='tf' , padding=__UpperCamelCase )
snake_case__ : List[Any] = inputs['input_ids']
snake_case__ : List[str] = model.generate(input_ids=__UpperCamelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
snake_case__ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
snake_case__ : str = model.generate(input_ids=__UpperCamelCase , max_new_tokens=12 )
snake_case__ : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
snake_case__ : Dict = model.generate(input_ids=__UpperCamelCase , max_new_tokens=12 )
snake_case__ : List[Any] = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
snake_case__ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : Union[str, Any] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [non_padded_sentence, padded_sentence] )
| 143 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( a_ , a_ , unittest.TestCase ):
__lowerCAmelCase = UNetaDModel
__lowerCAmelCase = "sample"
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (32, 32)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([10] ).to(lowerCamelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE_ ( a_ , a_ , unittest.TestCase ):
__lowerCAmelCase = UNetaDModel
__lowerCAmelCase = "sample"
@property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 4
UpperCamelCase = (32, 32)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([10] ).to(lowerCamelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (4, 32, 32)
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCamelCase_ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=lowerCamelCase_ )
model.to(lowerCamelCase_ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=lowerCamelCase_ )
model_accelerate.to(lowerCamelCase_ )
model_accelerate.eval()
UpperCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([10] * noise.shape[0] ).to(lowerCamelCase_ )
UpperCamelCase = model_accelerate(lowerCamelCase_ , lowerCamelCase_ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=lowerCamelCase_ , low_cpu_mem_usage=lowerCamelCase_ )
model_normal_load.to(lowerCamelCase_ )
model_normal_load.eval()
UpperCamelCase = model_normal_load(lowerCamelCase_ , lowerCamelCase_ )["""sample"""]
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , rtol=1E-3 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(lowerCamelCase_ )
UpperCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([10] * noise.shape[0] ).to(lowerCamelCase_ )
with torch.no_grad():
UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ ).sample
UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase_ , lowerCamelCase_ , rtol=1E-3 ) )
class SCREAMING_SNAKE_CASE_ ( a_ , a_ , unittest.TestCase ):
__lowerCAmelCase = UNetaDModel
__lowerCAmelCase = "sample"
@property
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int]=(32, 32) ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowerCamelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return (3, 32, 32)
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCamelCase_ )
UpperCamelCase = self.dummy_input
UpperCamelCase = floats_tensor((4, 3) + (256, 256) ).to(lowerCamelCase_ )
UpperCamelCase = noise
UpperCamelCase = model(**lowerCamelCase_ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(lowerCamelCase_ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (256, 256)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(lowerCamelCase_ )
with torch.no_grad():
UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase_ , lowerCamelCase_ , rtol=1E-2 ) )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(lowerCamelCase_ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (32, 32)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(lowerCamelCase_ )
with torch.no_grad():
UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase_ , lowerCamelCase_ , rtol=1E-2 ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
pass
| 350 | from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase( UpperCamelCase_ = True , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
UpperCamelCase = False
if main_process_only:
UpperCamelCase = PartialState().local_process_index == 0
return _tqdm(*UpperCamelCase_ , **UpperCamelCase_ , disable=UpperCamelCase_ )
| 165 | 0 |
'''simple docstring'''
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Dict=None ) -> Tuple:
__lowerCAmelCase = data
__lowerCAmelCase = previous
__lowerCAmelCase = next_node
def __str__( self : Tuple ) -> str:
return f"""{self.data}"""
def a ( self : Any ) -> int:
return self.data
def a ( self : Optional[int] ) -> Dict:
return self.next
def a ( self : str ) -> Optional[int]:
return self.previous
class _lowercase :
'''simple docstring'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
__lowerCAmelCase = head
def __iter__( self : Union[str, Any] ) -> List[str]:
return self
def a ( self : List[Any] ) -> Any:
if not self.current:
raise StopIteration
else:
__lowerCAmelCase = self.current.get_data()
__lowerCAmelCase = self.current.get_next()
return value
class _lowercase :
'''simple docstring'''
def __init__( self : int ) -> str:
__lowerCAmelCase = None # First node in list
__lowerCAmelCase = None # Last node in list
def __str__( self : str ) -> Union[str, Any]:
__lowerCAmelCase = self.head
__lowerCAmelCase = []
while current is not None:
nodes.append(current.get_data() )
__lowerCAmelCase = current.get_next()
return " ".join(str(UpperCamelCase__ ) for node in nodes )
def __contains__( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
__lowerCAmelCase = self.head
while current:
if current.get_data() == value:
return True
__lowerCAmelCase = current.get_next()
return False
def __iter__( self : Optional[Any] ) -> Tuple:
return LinkedListIterator(self.head )
def a ( self : Union[str, Any] ) -> str:
if self.head:
return self.head.get_data()
return None
def a ( self : Dict ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> None:
if self.head is None:
__lowerCAmelCase = node
__lowerCAmelCase = node
else:
self.insert_before_node(self.head , UpperCamelCase__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> None:
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.insert_after_node(self.tail , UpperCamelCase__ )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> None:
__lowerCAmelCase = Node(UpperCamelCase__ )
if self.head is None:
self.set_head(UpperCamelCase__ )
else:
self.set_tail(UpperCamelCase__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> None:
__lowerCAmelCase = node
__lowerCAmelCase = node.previous
if node.get_previous() is None:
__lowerCAmelCase = node_to_insert
else:
__lowerCAmelCase = node_to_insert
__lowerCAmelCase = node_to_insert
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> None:
__lowerCAmelCase = node
__lowerCAmelCase = node.next
if node.get_next() is None:
__lowerCAmelCase = node_to_insert
else:
__lowerCAmelCase = node_to_insert
__lowerCAmelCase = node_to_insert
def a ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any ) -> None:
__lowerCAmelCase = 1
__lowerCAmelCase = Node(UpperCamelCase__ )
__lowerCAmelCase = self.head
while node:
if current_position == position:
self.insert_before_node(UpperCamelCase__ , UpperCamelCase__ )
return
current_position += 1
__lowerCAmelCase = node.next
self.insert_after_node(self.tail , UpperCamelCase__ )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> Node:
__lowerCAmelCase = self.head
while node:
if node.get_data() == item:
return node
__lowerCAmelCase = node.get_next()
raise Exception("""Node not found""" )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
if (node := self.get_node(UpperCamelCase__ )) is not None:
if node == self.head:
__lowerCAmelCase = self.head.get_next()
if node == self.tail:
__lowerCAmelCase = self.tail.get_previous()
self.remove_node_pointers(UpperCamelCase__ )
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : Tuple ) -> None:
if node.get_next():
__lowerCAmelCase = node.previous
if node.get_previous():
__lowerCAmelCase = node.next
__lowerCAmelCase = None
__lowerCAmelCase = None
def a ( self : Optional[int] ) -> Tuple:
return self.head is None
def UpperCamelCase_ ( ) -> None:
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 |
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48 | 0 |
import sys
from collections import defaultdict
class _snake_case :
def __init__( self : List[str] ):
lowercase__ = []
def A__ ( self : Optional[int], __lowercase : str ):
return self.node_position[vertex]
def A__ ( self : int, __lowercase : List[Any], __lowercase : Optional[int] ):
lowercase__ = pos
def A__ ( self : Tuple, __lowercase : Dict, __lowercase : List[str], __lowercase : int, __lowercase : Optional[int] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowercase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowercase__ = 2 * start + 1
else:
lowercase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowercase__ , lowercase__ = heap[smallest_child], positions[smallest_child]
lowercase__ , lowercase__ = (
heap[start],
positions[start],
)
lowercase__ , lowercase__ = temp, tempa
lowercase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child], self.get_position(positions[start] ) )
self.set_position(positions[start], __lowercase )
self.top_to_bottom(__lowercase, __lowercase, __lowercase, __lowercase )
def A__ ( self : int, __lowercase : Tuple, __lowercase : List[str], __lowercase : Union[str, Any], __lowercase : Any ):
lowercase__ = position[index]
while index != 0:
lowercase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowercase__ = heap[parent]
lowercase__ = position[parent]
self.set_position(position[parent], __lowercase )
else:
lowercase__ = val
lowercase__ = temp
self.set_position(__lowercase, __lowercase )
break
lowercase__ = parent
else:
lowercase__ = val
lowercase__ = temp
self.set_position(__lowercase, 0 )
def A__ ( self : Dict, __lowercase : Any, __lowercase : Tuple ):
lowercase__ = len(__lowercase ) // 2 - 1
for i in range(__lowercase, -1, -1 ):
self.top_to_bottom(__lowercase, __lowercase, len(__lowercase ), __lowercase )
def A__ ( self : Any, __lowercase : str, __lowercase : int ):
lowercase__ = positions[0]
lowercase__ = sys.maxsize
self.top_to_bottom(__lowercase, 0, len(__lowercase ), __lowercase )
return temp
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = Heap()
lowercase__ = [0] * len(SCREAMING_SNAKE_CASE_ )
lowercase__ = [-1] * len(SCREAMING_SNAKE_CASE_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowercase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowercase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE_ ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE_ )
heap.node_position.append(SCREAMING_SNAKE_CASE_ )
lowercase__ = []
lowercase__ = 1
lowercase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowercase__ = 0
lowercase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for _ in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowercase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE_ )]
):
lowercase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE_ , heap.get_position(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 224 |
import fire
from utils import calculate_rouge, save_json
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
lowercase__ = [x.strip() for x in open(SCREAMING_SNAKE_CASE_ ).readlines()]
lowercase__ = [x.strip() for x in open(SCREAMING_SNAKE_CASE_ ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
lowercase__ = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if save_path is not None:
save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 224 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = "▁"
lowerCamelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
lowerCamelCase__ = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
lowerCamelCase__ = {
"facebook/xglm-564M": 2048,
}
class A__ ( _a ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , a : List[Any] , a : str="<s>" , a : Union[str, Any]="</s>" , a : str="</s>" , a : Optional[int]="<s>" , a : str="<unk>" , a : str="<pad>" , a : Any = None , **a : Any , ):
'''simple docstring'''
lowerCAmelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCAmelCase__ : List[str] = 7
lowerCAmelCase__ : str = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowerCAmelCase__ : List[str] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
lowerCAmelCase__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
lowerCAmelCase__ : Any = len(self.sp_model )
lowerCAmelCase__ : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(a )
lowerCAmelCase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.__dict__.copy()
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCamelCase ( self : Optional[Any] , a : str , a : Optional[Any] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _lowerCamelCase ( self : Any , a : Tuple , a : Dict = None , a : str = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a ))
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a ))
def _lowerCamelCase ( self : Optional[int] , a : List[Any] , a : Optional[Any] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self : Tuple , a : Any ):
'''simple docstring'''
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self : Tuple , a : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ : Optional[Any] = self.sp_model.PieceToId(a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCamelCase ( self : List[Any] , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ''.join(a ).replace(a , ' ' ).strip()
return out_string
def _lowerCamelCase ( self : Optional[Any] , a : Dict , a : List[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowerCAmelCase__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,) | 212 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase_ : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def UpperCamelCase ( self, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(lowerCamelCase))
_lowercase : Union[str, Any] = torch.manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[int] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : str = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inputs()
_lowercase : List[Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = self.get_dummy_inputs()
_lowercase : Union[str, Any] = pipe(**lowerCamelCase).images
_lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Dict = self.get_dummy_inputs()
_lowercase : Optional[Any] = pipe(**lowerCamelCase).images
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
_lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = self.get_dummy_inputs()
_lowercase : List[str] = pipe(**lowerCamelCase).images
_lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = ort.SessionOptions()
_lowercase : str = False
return options
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
# using the PNDM scheduler by default
_lowercase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : List[Any] = output.images
_lowercase : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : List[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
_lowercase : int = init_image.resize((1_28, 1_28))
_lowercase : str = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler')
_lowercase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A fantasy landscape, trending on artstation'
_lowercase : List[Any] = torch.manual_seed(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : str = output.images
_lowercase : str = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 21 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def _snake_case ( lowercase__ : int , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : Tuple=True ) -> str:
'''simple docstring'''
model.train()
lowerCAmelCase_ :str = model(lowercase__ )
lowerCAmelCase_ :str = F.mse_loss(lowercase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase__ )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : List[Any]=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
lowerCAmelCase_ :Dict = RegressionModel()
lowerCAmelCase_ :Optional[Any] = deepcopy(lowercase__ )
lowerCAmelCase_ :Optional[int] = RegressionDataset(length=8_0 )
lowerCAmelCase_ :Tuple = DataLoader(lowercase__ , batch_size=1_6 )
model.to(accelerator.device )
if sched:
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=1E-3 )
lowerCAmelCase_ :Dict = AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowerCAmelCase_ :Union[str, Any] = LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.65 )
lowerCAmelCase_ :Optional[Any] = LambdaLR(lowercase__ , lr_lambda=lambda lowercase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCAmelCase_ :Optional[int] = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
lowerCAmelCase_ :Tuple = accelerator.prepare(lowercase__ , lowercase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _snake_case ( lowercase__ : int ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = get_training_setup(lowercase__ )
# Use a single batch
lowerCAmelCase_ :Optional[Any] = next(iter(lowercase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCAmelCase_ :List[str] = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase_ :str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
# Sync grads
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCAmelCase_ :int = ddp_input[torch.randperm(len(lowercase__ ) )]
def _snake_case ( lowercase__ : Dict ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = get_training_setup(lowercase__ )
# Use a single batch
lowerCAmelCase_ :Optional[int] = next(iter(lowercase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCAmelCase_ :List[str] = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase_ :Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
# Sync grads
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCAmelCase_ :int = ddp_input[torch.randperm(len(lowercase__ ) )]
def _snake_case ( lowercase__ : Optional[int]=False , lowercase__ : Optional[Any]=False ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = Accelerator(
split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCAmelCase_ :str = get_training_setup(lowercase__ )
for iteration, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Tuple = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCAmelCase_ :Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase_ :Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
lowerCAmelCase_ :List[str] = ddp_input[torch.randperm(len(lowercase__ ) )]
GradientState._reset_state()
def _snake_case ( lowercase__ : Dict=False , lowercase__ : Any=False ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Dict = Accelerator(
split_batches=lowercase__ , dispatch_batches=lowercase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCAmelCase_ :int = get_training_setup(lowercase__ , lowercase__ )
for iteration, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCAmelCase_ :Dict = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase_ :List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase__ ):
step_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
lowerCAmelCase_ :Any = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Tuple = Accelerator()
lowerCAmelCase_ :List[Any] = RegressionDataset(length=8_0 )
lowerCAmelCase_ :Tuple = DataLoader(lowercase__ , batch_size=1_6 )
lowerCAmelCase_ :List[Any] = RegressionDataset(length=9_6 )
lowerCAmelCase_ :Dict = DataLoader(lowercase__ , batch_size=1_6 )
lowerCAmelCase_ :Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ )
if iteration < len(lowercase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase__ )
if batch_num < len(lowercase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = Accelerator()
lowerCAmelCase_ :Dict = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowercase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowercase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(lowercase__ , lowercase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ : Any ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 370 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Tuple:
_A : str = 0
@slow
def a__ ( self ) -> Tuple:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_A : Tuple = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_a ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_A : Optional[Any] = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_a ) , 0 )
def a__ ( self ) -> List[str]:
_A : str = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def a__ ( self ) -> Tuple:
_A : str = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def a__ ( self ) -> str:
_A : int = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
# Check that tokenizer_type ≠ model_type
_A : int = AutoTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_a , """vocab.txt""" ) )
_A : Dict = AutoTokenizer.from_pretrained(_a , tokenizer_type="""bert""" , use_fast=_a )
self.assertIsInstance(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_a , """merges.txt""" ) )
_A : str = AutoTokenizer.from_pretrained(_a , tokenizer_type="""gpt2""" , use_fast=_a )
self.assertIsInstance(_a , _a )
@require_tokenizers
def a__ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_a , """vocab.txt""" ) )
_A : int = AutoTokenizer.from_pretrained(_a , tokenizer_type="""bert""" )
self.assertIsInstance(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_a , """merges.txt""" ) )
_A : Dict = AutoTokenizer.from_pretrained(_a , tokenizer_type="""gpt2""" )
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Tuple:
with pytest.raises(_a ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def a__ ( self ) -> List[str]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_A : List[str] = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
if isinstance(_a , _a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _a )
else:
self.assertEqual(tokenizer.do_lower_case , _a )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def a__ ( self ) -> Union[str, Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_a , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
_A : Dict = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def a__ ( self ) -> int:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
_A : Dict = TOKENIZER_MAPPING.values()
_A : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_a )
@require_tokenizers
def a__ ( self ) -> str:
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=_a ) , _a )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , _a )
@require_tokenizers
def a__ ( self ) -> Union[str, Any]:
_A : Optional[Any] = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=_a )
_A : Optional[Any] = """Hello, world. How are you?"""
_A : List[Any] = tokenizer.tokenize(_a )
self.assertEqual("""[UNK]""" , tokens[0] )
_A : str = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=_a )
_A : Tuple = tokenizer.tokenize(_a )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def a__ ( self ) -> Any:
_A : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(_a ) , _a )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def a__ ( self ) -> Tuple:
_A : Optional[int] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : List[str] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def a__ ( self ) -> Dict:
_A : Tuple = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_a , _a )
def a__ ( self ) -> Union[str, Any]:
# Check we can load the tokenizer config of an online model.
_A : List[str] = get_tokenizer_config("""bert-base-cased""" )
_A : Any = config.pop("""_commit_hash""" , _a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_a , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_A : Optional[int] = get_tokenizer_config(_a )
self.assertDictEqual(_a , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_A : Union[str, Any] = AutoTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : List[Any] = get_tokenizer_config(_a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def a__ ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
_A : List[Any] = CustomTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : int = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> str:
try:
AutoConfig.register("""custom""" , _a )
# Can register in two steps
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_a , slow_tokenizer_class=_a , fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_A : str = BertTokenizerFast.from_pretrained(_a )
bert_tokenizer.save_pretrained(_a )
_A : Optional[Any] = CustomTokenizerFast.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : Any = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_A : Dict = AutoTokenizer.from_pretrained(_a , use_fast=_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_A : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_A : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
_A : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : int = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_A : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a , use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
_A : List[Any] = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a , use_fast=_a )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def a__ ( self ) -> int:
class lowercase ( UpperCamelCase__ ):
_a = False
class lowercase ( UpperCamelCase__ ):
_a = NewTokenizer
_a = False
try:
AutoConfig.register("""custom""" , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
# If remote code is not set, the default is to use local
_A : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
_A : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_A : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
_A : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_a , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> List[Any]:
_A : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_A : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_a , use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def a__ ( self ) -> Tuple:
with self.assertRaisesRegex(
_a , """bert-base is not a local folder and is not a valid model identifier""" ):
_A : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def a__ ( self ) -> str:
with self.assertRaisesRegex(
_a , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_A : Union[str, Any] = AutoTokenizer.from_pretrained(_a , revision="""aaaaaa""" )
def a__ ( self ) -> str:
# Make sure we have cached the tokenizer.
_A : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 26 |
'''simple docstring'''
a_ : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a_ : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a_ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 55 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = LDMTextToImagePipeline
SCREAMING_SNAKE_CASE : int = TEXT_TO_IMAGE_PARAMS - {
'negative_prompt',
'negative_prompt_embeds',
'cross_attention_kwargs',
'prompt_embeds',
}
SCREAMING_SNAKE_CASE : Tuple = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'callback',
'callback_steps',
}
SCREAMING_SNAKE_CASE : int = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Any = False
def SCREAMING_SNAKE_CASE ( self : Tuple ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=3_2 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=(3_2, 6_4) ,in_channels=3 ,out_channels=3 ,down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') ,up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') ,latent_channels=4 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[Any] ,lowercase__ : List[Any]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = LDMTextToImagePipeline(**lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
__lowercase = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str]=torch.floataa ,lowercase__ : List[str]=0 ):
__lowercase = torch.manual_seed(lowercase__ )
__lowercase = np.random.RandomState(lowercase__ ).standard_normal((1, 4, 3_2, 3_2) )
__lowercase = torch.from_numpy(lowercase__ ).to(device=lowercase__ ,dtype=lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_inputs(lowercase__ )
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowercase = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
__lowercase = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Dict ,lowercase__ : List[Any]=torch.floataa ,lowercase__ : Dict=0 ):
__lowercase = torch.manual_seed(lowercase__ )
__lowercase = np.random.RandomState(lowercase__ ).standard_normal((1, 4, 3_2, 3_2) )
__lowercase = torch.from_numpy(lowercase__ ).to(device=lowercase__ ,dtype=lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 5_0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_inputs(lowercase__ )
__lowercase = pipe(**lowercase__ ).images[0]
__lowercase = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
__lowercase = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 52 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE : str = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ) as fp:
fp.write(json.dumps(lowercase__ ) )
with open(self.merges_file ,'''w''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
__lowercase = '''lower'''
__lowercase = ['''low''', '''er</w>''']
__lowercase = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokens + ['''<unk>''']
__lowercase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Dict=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowercase__ ,**lowercase__ )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
pass
| 52 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 2_00 ):
'''simple docstring'''
lowerCAmelCase = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
lowerCAmelCase = [0] * (pence + 1)
lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 46 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Optional[Any] = DDIMPipeline
lowerCamelCase_ :str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase_ :Any = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
lowerCamelCase_ :str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase_ :Optional[Any] = False
def _UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
UpperCAmelCase_ : int = DDIMScheduler()
UpperCAmelCase_ : Any = {'''unet''': unet, '''scheduler''': scheduler}
return components
def _UpperCamelCase ( self , snake_case_ , snake_case_=0 ):
'''simple docstring'''
if str(__lowercase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(__lowercase )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
UpperCAmelCase_ : List[Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''cpu'''
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : List[Any] = self.pipeline_class(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase_ : Any = self.get_dummy_inputs(__lowercase )
UpperCAmelCase_ : Union[str, Any] = pipe(**__lowercase ).images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
UpperCAmelCase_ : int = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
UpperCAmelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowercase , 1E-3 )
def _UpperCamelCase ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _UpperCamelCase ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def _UpperCamelCase ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _UpperCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''google/ddpm-cifar10-32'''
UpperCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained(__lowercase )
UpperCAmelCase_ : Dict = DDIMScheduler()
UpperCAmelCase_ : Optional[Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase )
ddim.to(__lowercase )
ddim.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ddim(generator=__lowercase , eta=0.0 , output_type='numpy' ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = '''google/ddpm-ema-bedroom-256'''
UpperCAmelCase_ : str = UNetaDModel.from_pretrained(__lowercase )
UpperCAmelCase_ : int = DDIMScheduler.from_pretrained(__lowercase )
UpperCAmelCase_ : Any = DDIMPipeline(unet=__lowercase , scheduler=__lowercase )
ddpm.to(__lowercase )
ddpm.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = ddpm(generator=__lowercase , output_type='numpy' ).images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCAmelCase_ : Dict = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 367 | '''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase_ :int
lowerCamelCase_ :int
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : list[list[Edge]] = [[] for _ in range(snake_case_ )]
UpperCAmelCase_ : Any = size
def __getitem__( self , snake_case_ ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._size
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = deque([start_vertex] )
UpperCAmelCase_ : list[int | None] = [None] * self.size
UpperCAmelCase_ : str = 0
while queue:
UpperCAmelCase_ : Optional[int] = queue.popleft()
UpperCAmelCase_ : List[str] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase_ : Union[str, Any] = current_distance + edge.weight
UpperCAmelCase_ : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase_ : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 | 0 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = DownBlockaD # noqa F405
A_ : Optional[int] = "down"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = ResnetDownsampleBlockaD # noqa F405
A_ : List[str] = "down"
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Any = AttnDownBlockaD # noqa F405
A_ : List[Any] = "down"
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = CrossAttnDownBlockaD # noqa F405
A_ : Dict = "down"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A , __A = super().prepare_init_args_and_inputs_for_common()
__A = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = SimpleCrossAttnDownBlockaD # noqa F405
A_ : List[Any] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A , __A = super().prepare_init_args_and_inputs_for_common()
__A = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''', '''MPS result is not consistent''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = SkipDownBlockaD # noqa F405
A_ : Dict = "down"
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = AttnSkipDownBlockaD # noqa F405
A_ : Dict = "down"
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = DownEncoderBlockaD # noqa F405
A_ : Dict = "down"
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__A = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = AttnDownEncoderBlockaD # noqa F405
A_ : Optional[int] = "down"
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = {
'''in_channels''': 32,
'''out_channels''': 32,
}
__A = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = UNetMidBlockaD # noqa F405
A_ : Tuple = "mid"
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = {
'''in_channels''': 32,
'''temb_channels''': 1_28,
}
__A = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = UNetMidBlockaDCrossAttn # noqa F405
A_ : str = "mid"
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A , __A = super().prepare_init_args_and_inputs_for_common()
__A = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = UNetMidBlockaDSimpleCrossAttn # noqa F405
A_ : Union[str, Any] = "mid"
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A , __A = super().prepare_init_args_and_inputs_for_common()
__A = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = UpBlockaD # noqa F405
A_ : Tuple = "up"
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Any = ResnetUpsampleBlockaD # noqa F405
A_ : Any = "up"
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = CrossAttnUpBlockaD # noqa F405
A_ : Optional[Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A , __A = super().prepare_init_args_and_inputs_for_common()
__A = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = SimpleCrossAttnUpBlockaD # noqa F405
A_ : Any = "up"
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase, include_encoder_hidden_states=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A , __A = super().prepare_init_args_and_inputs_for_common()
__A = 32
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = AttnUpBlockaD # noqa F405
A_ : Optional[int] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase )
@unittest.skipIf(torch_device == '''mps''', '''MPS result is not consistent''' )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = SkipUpBlockaD # noqa F405
A_ : int = "up"
@property
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = AttnSkipUpBlockaD # noqa F405
A_ : Any = "up"
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = UpDecoderBlockaD # noqa F405
A_ : int = "up"
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = {'''in_channels''': 32, '''out_channels''': 32}
__A = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(_lowerCamelCase )
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = AttnUpDecoderBlockaD # noqa F405
A_ : Optional[Any] = "up"
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return super().get_dummy_input(include_temb=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = {'''in_channels''': 32, '''out_channels''': 32}
__A = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(_lowerCamelCase )
| 266 |
"""simple docstring"""
from __future__ import annotations
class snake_case :
'''simple docstring'''
def __init__( self : int, _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__A = data
__A = None
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
__A = []
__A = self
while temp:
string_rep.append(f'{temp.data}' )
__A = temp.next
return "->".join(_lowerCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not elements_list:
raise Exception('''The Elements List is empty''' )
__A = __A = Node(elements_list[0] )
for i in range(1 , len(__UpperCamelCase ) ):
__A = Node(elements_list[i] )
__A = current.next
return head
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase ( ):
"""simple docstring"""
from doctest import testmod
testmod()
__A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print('''Linked List:''' )
print(__UpperCamelCase )
print('''Elements in Reverse:''' )
print_reverse(__UpperCamelCase )
if __name__ == "__main__":
main()
| 266 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
a__ : Tuple =AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ).save_pretrained(SCREAMING_SNAKE_CASE )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 148 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] =parent
a__ : Tuple =batch_size
a__ : List[Any] =seq_length
a__ : Dict =is_training
a__ : Any =use_input_mask
a__ : int =use_token_type_ids
a__ : Optional[Any] =use_labels
a__ : Optional[Any] =vocab_size
a__ : List[str] =hidden_size
a__ : int =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Union[str, Any] =intermediate_size
a__ : Optional[int] =hidden_act
a__ : int =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : List[Any] =max_position_embeddings
a__ : str =type_vocab_size
a__ : Optional[Any] =type_sequence_label_size
a__ : Union[str, Any] =initializer_range
a__ : List[Any] =num_labels
a__ : str =num_choices
a__ : int =scope
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : str =None
if self.use_input_mask:
a__ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
a__ : str =None
if self.use_token_type_ids:
a__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : Dict =None
a__ : str =None
a__ : str =None
if self.use_labels:
a__ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Dict =ids_tensor([self.batch_size] , self.num_choices )
a__ : Tuple =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Tuple =NystromformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
a__ : str =model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
a__ : Optional[int] =model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : int =NystromformerForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Dict =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : Optional[int] =NystromformerForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : str =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] =self.num_labels
a__ : Dict =NystromformerForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[str] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Tuple =self.num_labels
a__ : List[str] =NystromformerForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : List[Any] =self.num_choices
a__ : Optional[Any] =NystromformerForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[str] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[Any] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Dict =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[str] =config_and_inputs
a__ : str ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : int = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_lowercase : Union[str, Any] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Union[str, Any] = False
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] =NystromformerModelTester(self )
a__ : Optional[int] =ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ : int =type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : int =NystromformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : str =NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
a__ : int =torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
a__ : Tuple =model(lowerCAmelCase__ )[0]
a__ : List[str] =torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
a__ : int =torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] ="the [MASK] of Belgium is Brussels"
a__ : str =AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
a__ : int =NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
a__ : List[Any] =tokenizer(lowerCAmelCase__ , return_tensors="pt" )
with torch.no_grad():
a__ : str =model(encoding.input_ids ).logits
a__ : List[str] =token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowerCAmelCase__ ) , "capital" )
| 148 | 1 |
'''simple docstring'''
import math
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
__UpperCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowercase )
if number < 1:
__UpperCamelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , __lowercase ):
for _ in range(__lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a__ : str =0
try:
a__ : Dict =proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 53 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22 | 0 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_lowerCamelCase : List[str] = """scheduler_config.json"""
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = 5
__lowerCAmelCase = 6
__lowerCAmelCase = 7
__lowerCAmelCase = 8
__lowerCAmelCase = 9
__lowerCAmelCase = 10
__lowerCAmelCase = 11
__lowerCAmelCase = 12
__lowerCAmelCase = 13
__lowerCAmelCase = 14
@dataclass
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = 42
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = SCHEDULER_CONFIG_NAME
__lowerCAmelCase = []
__lowerCAmelCase = True
@classmethod
def A (cls : List[Any] , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Tuple = None , _lowerCAmelCase : List[str]=False , **_lowerCAmelCase : Optional[Any] , ):
A = cls.load_config(
pretrained_model_name_or_path=__A , subfolder=__A , return_unused_kwargs=__A , return_commit_hash=__A , **__A , )
return cls.from_config(__A , return_unused_kwargs=__A , **__A )
def A (self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] = False , **_lowerCAmelCase : Dict ):
self.save_config(save_directory=__A , push_to_hub=__A , **__A )
@property
def A (self : List[Any] ):
return self._get_compatibles()
@classmethod
def A (cls : Tuple ):
A = list(set([cls.__name__] + cls._compatibles ) )
A = importlib.import_module(__name__.split(""".""" )[0] )
A = [
getattr(__A , __A ) for c in compatible_classes_str if hasattr(__A , __A )
]
return compatible_classes
| 351 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_lowerCamelCase : Dict = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_lowerCamelCase : Optional[Any] = {
'ctrl': 256,
}
_lowerCamelCase : List[str] = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(UpperCAmelCase )
return pairs
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = CONTROL_CODES
def __init__(self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]="<unk>" , **_lowerCAmelCase : Dict ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
A = json.load(_lowerCAmelCase )
A = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
A = {}
@property
def A (self : Tuple ):
return len(self.encoder )
def A (self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def A (self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
A = tuple(_lowerCAmelCase )
A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
A = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(_lowerCAmelCase ):
try:
A = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(_lowerCAmelCase )
A = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
A = get_pairs(_lowerCAmelCase )
A = """@@ """.join(_lowerCAmelCase )
A = word[:-4]
A = word
return word
def A (self : List[str] , _lowerCAmelCase : Dict ):
A = []
A = re.findall(r"""\S+\n?""" , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def A (self : str , _lowerCAmelCase : int ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A (self : Dict , _lowerCAmelCase : str ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def A (self : List[str] , _lowerCAmelCase : List[Any] ):
A = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def A (self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
A = 0
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(_lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 337 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
"""simple docstring"""
def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def snake_case__ ( self : int )-> List[str]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Tuple )-> List[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,)
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = TFViTModel(config=lowercase_ )
A__ = model(lowercase_,training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A__ = self.image_size // 2
A__ = pixel_values[:, :, :image_size, :image_size]
A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ )
A__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFViTForImageClassification(lowercase_ )
A__ = model(lowercase_,labels=lowercase_,training=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A__ = self.image_size // 2
A__ = pixel_values[:, :, :image_size, :image_size]
A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = TFViTForImageClassification(lowercase_ )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
A__ = TFViTModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
pass
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) )
def snake_case__ ( self : int )-> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1],lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(lowercase_ )
def _snake_case( ) -> str:
'''simple docstring'''
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : List[Any] )-> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase_,return_tensors='tf' )
# forward pass
A__ = model(**lowercase_ )
# verify the logits
A__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,lowercase_ )
A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
| 7 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
"""simple docstring"""
def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def snake_case__ ( self : int )-> List[str]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Tuple )-> List[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,)
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = TFViTModel(config=lowercase_ )
A__ = model(lowercase_,training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A__ = self.image_size // 2
A__ = pixel_values[:, :, :image_size, :image_size]
A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ )
A__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFViTForImageClassification(lowercase_ )
A__ = model(lowercase_,labels=lowercase_,training=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A__ = self.image_size // 2
A__ = pixel_values[:, :, :image_size, :image_size]
A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = TFViTForImageClassification(lowercase_ )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
A__ = TFViTModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
pass
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) )
def snake_case__ ( self : int )-> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1],lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(lowercase_ )
def _snake_case( ) -> str:
'''simple docstring'''
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : List[Any] )-> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase_,return_tensors='tf' )
# forward pass
A__ = model(**lowercase_ )
# verify the logits
A__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,lowercase_ )
A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
| 7 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=13 , _A=3 , _A=224 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , ) -> Tuple:
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def _UpperCamelCase ( self ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =ViTImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = EfficientFormerImageProcessorTester(self )
@property
def _UpperCamelCase ( self ) -> str:
return self.image_proc_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> Dict:
# Initialize image_processor
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _UpperCamelCase ( self ) -> int:
# Initialize image_processor
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def _UpperCamelCase ( self ) -> Union[str, Any]:
# Initialize image_processor
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 257 |
__UpperCAmelCase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 257 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __magic_name__ ( UpperCAmelCase__):
def __init__( self : Dict , lowerCamelCase__ : Distribution , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[Any]=0 ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = 1.0 if scale is None else scale
UpperCamelCase__ : Dict = 0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE__ )] )
@property
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return self.variance.sqrt()
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Callable[..., Tuple[torch.Tensor]] , **lowerCamelCase__ : Dict ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ : Optional[int] = args_dim
UpperCamelCase__ : List[str] = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for dim in args_dim.values()] )
UpperCamelCase__ : int = domain_map
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : torch.Tensor ) -> Tuple[torch.Tensor]:
'''simple docstring'''
UpperCamelCase__ : int = [proj(SCREAMING_SNAKE_CASE__ ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE__ )
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowerCamelCase__ : int ) -> str:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : str = function
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Tuple , *lowerCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
return self.function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
class __magic_name__ :
A: type
A: int
A: Dict[str, int]
def __init__( self : Optional[Any] , lowerCamelCase__ : int = 1 ) -> None:
'''simple docstring'''
UpperCamelCase__ : Tuple = dim
UpperCamelCase__ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE__ )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE__ ) , 1 )
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[torch.Tensor] = None , lowerCamelCase__ : Optional[torch.Tensor] = None , ) -> Distribution:
'''simple docstring'''
UpperCamelCase__ : Tuple = self._base_distribution(SCREAMING_SNAKE_CASE__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE__ , loc=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , event_dim=self.event_dim )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 0.0
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : int ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCAmelCase__ ( self : Union[str, Any] , *lowerCamelCase__ : torch.Tensor ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE__ ) + 4.0 )) / 2.0
class __magic_name__ ( UpperCAmelCase__):
A: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
A: type = StudentT
@classmethod
def UpperCAmelCase__ ( cls : Any , lowerCamelCase__ : torch.Tensor , lowerCamelCase__ : torch.Tensor , lowerCamelCase__ : torch.Tensor ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = cls.squareplus(SCREAMING_SNAKE_CASE__ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCamelCase__ : Tuple = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __magic_name__ ( UpperCAmelCase__):
A: Dict[str, int] = {"loc": 1, "scale": 1}
A: type = Normal
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , lowerCamelCase__ : torch.Tensor , lowerCamelCase__ : torch.Tensor ) -> int:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = cls.squareplus(SCREAMING_SNAKE_CASE__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __magic_name__ ( UpperCAmelCase__):
A: Dict[str, int] = {"total_count": 1, "logits": 1}
A: type = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls : str , lowerCamelCase__ : torch.Tensor , lowerCamelCase__ : torch.Tensor ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = cls.squareplus(SCREAMING_SNAKE_CASE__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) -> Distribution:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : List[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ ) , 1 )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[torch.Tensor] = None , lowerCamelCase__ : Optional[torch.Tensor] = None ) -> Distribution:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 146 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _lowercase ( datasets.BuilderConfig ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class _lowercase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = PandasConfig
def a ( self : Union[str, Any] ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE__ , (str, list, tuple) ):
__lowerCAmelCase = data_files
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ , gen_kwargs={"""files""": files} ) )
return splits
def a ( self : Any , SCREAMING_SNAKE_CASE__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase = table_cast(SCREAMING_SNAKE_CASE__ , self.config.features.arrow_schema )
return pa_table
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
for i, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) ):
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f:
__lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(SCREAMING_SNAKE_CASE__ ) )
yield i, self._cast_table(SCREAMING_SNAKE_CASE__ )
| 229 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Any = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """detr"""
_lowerCAmelCase = ["""past_key_values"""]
_lowerCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=1_00 , __magic_name__=6 , __magic_name__=20_48 , __magic_name__=8 , __magic_name__=6 , __magic_name__=20_48 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_56 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0_2 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=1 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.1 , **__magic_name__ , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__magic_name__ , __magic_name__ ):
_a = backbone_config.get('model_type' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__magic_name__ )
# set timm attributes to None
_a , _a , _a = None, None, None
_a = use_timm_backbone
_a = backbone_config
_a = num_channels
_a = num_queries
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = encoder_layers
_a = auxiliary_loss
_a = position_embedding_type
_a = backbone
_a = use_pretrained_backbone
_a = dilation
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> Any:
return cls(backbone_config=__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict[str, any]:
_a = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = version.parse("""1.11""" )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-5
@property
def __UpperCAmelCase ( self ) -> int:
return 12
| 104 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _A (lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
_a = x
_a = y
for step in range(lowerCAmelCase__ ): # noqa: B007
_a = a * a - b * b + x
_a = 2 * a * b + y
_a = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _A (lowerCAmelCase__ :float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _A (lowerCAmelCase__ :float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def _A (lowerCAmelCase__ :int = 8_00 , lowerCAmelCase__ :int = 6_00 , lowerCAmelCase__ :float = -0.6 , lowerCAmelCase__ :float = 0 , lowerCAmelCase__ :float = 3.2 , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :bool = True , ) -> Image.Image:
'''simple docstring'''
_a = Image.new('RGB' , (image_width, image_height) )
_a = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
_a = figure_width / image_width * image_height
_a = figure_center_x + (image_x / image_width - 0.5) * figure_width
_a = figure_center_y + (image_y / image_height - 0.5) * figure_height
_a = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_a = get_color_coded_rgb(lowerCAmelCase__ )
else:
_a = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 104 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
snake_case_ : Union[str, Any] = quote(_UpperCamelCase )
return hfh.hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' , revision=_UpperCamelCase )
| 279 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''pixel_values''']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 255 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = True , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = size if size is not None else {'shortest_edge': 224}
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
__lowerCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ , param_name='crop_size' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCamelCase = do_convert_rgb
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=size['shortest_edge'] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size['height'], size['width']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ) -> PIL.Image.Image:
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='size' , default_to_square=lowerCamelCase__ )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(lowerCamelCase__ , param_name='crop_size' , default_to_square=lowerCamelCase__ )
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCamelCase = [convert_to_rgb(lowerCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
__lowerCamelCase = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 348 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__A = logging.get_logger(__name__)
__A = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__A = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''whisper'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = num_mel_bins
__lowerCamelCase = d_model
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase = max_source_positions
__lowerCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
__lowerCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
__lowerCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCamelCase = {0: 'batch'}
else:
__lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' )
return common_inputs
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowerCamelCase = OrderedDict()
__lowerCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , )
__lowerCamelCase = encoder_inputs['input_features'].shape[2]
__lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = encoder_inputs.pop('input_features' )
__lowerCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__lowerCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 348 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict=13 , UpperCamelCase__: int=30 , UpperCamelCase__: Optional[int]=2 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Any=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: Tuple=4 , UpperCamelCase__: Optional[Any]=37 , UpperCamelCase__: List[Any]="gelu" , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Tuple=10 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: List[Any]=3 , UpperCamelCase__: str=0.6 , UpperCamelCase__: str=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : List[str] = mask_ratio
lowerCamelCase__ : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase__ : Any = (image_size // patch_size) ** 2
lowerCamelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = None
if self.use_labels:
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Any ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel(config=UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , training=UpperCamelCase__ )
# expected sequence length = num_patches
lowerCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowerCamelCase__ : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Union[str, Any] = TFViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
lowerCamelCase__ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Optional[int] = config_and_inputs
lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = TFViTMAEModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : int = copy.deepcopy(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : str = outputs_dict[0].numpy()
lowerCamelCase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def lowerCamelCase_ ( self: Dict ):
# make the mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCamelCase__: int ):
lowerCamelCase__ : Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = v.numpy()
else:
lowerCamelCase__ : Union[str, Any] = np.array(UpperCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = prepare_numpy_arrays(UpperCamelCase__ )
lowerCamelCase__ : int = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : Any = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: str ):
# make masks reproducible
np.random.seed(2 )
lowerCamelCase__ : List[str] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCamelCase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : Optional[int] = tf.constant(UpperCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase__ : Tuple = tf_noise
super().check_pt_tf_models(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : List[Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCamelCase__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCamelCase__ , UpperCamelCase__ ),)
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCamelCase__ , """_keras_serializable""" , UpperCamelCase__ )
}
lowerCamelCase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase__ : List[str] = tf.convert_to_tensor(UpperCamelCase__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCamelCase__ : List[str] = main_layer_class(UpperCamelCase__ )
lowerCamelCase__ : int = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCamelCase__ : List[str] = tf.keras.Model(UpperCamelCase__ , outputs=main_layer(UpperCamelCase__ ) )
lowerCamelCase__ : Union[str, Any] = model(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__ : int = os.path.join(UpperCamelCase__ , """keras_model.h5""" )
model.save(UpperCamelCase__ )
lowerCamelCase__ : int = tf.keras.models.load_model(
UpperCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCamelCase__ , tf.keras.Model )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: str ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Tuple = model_class(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : Any = outputs.last_hidden_state.numpy()
lowerCamelCase__ : List[str] = 0
else:
lowerCamelCase__ : int = outputs.logits.numpy()
lowerCamelCase__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ , saved_model=UpperCamelCase__ )
lowerCamelCase__ : Dict = model_class.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , noise=UpperCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCamelCase__ : str = after_outputs["""last_hidden_state"""].numpy()
lowerCamelCase__ : Optional[Any] = 0
else:
lowerCamelCase__ : Union[str, Any] = after_outputs["""logits"""].numpy()
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase__ , 1e-5 )
def lowerCamelCase_ ( self: Any ):
# make mask reproducible
np.random.seed(2 )
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
lowerCamelCase__ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Any = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : List[str] = model(UpperCamelCase__ , noise=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCamelCase__ )
lowerCamelCase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCamelCase__ : int = model_class.from_config(model.config )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
lowerCamelCase__ : List[Any] = new_model(UpperCamelCase__ , noise=UpperCamelCase__ )
self.assert_outputs_same(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Optional[Any] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: List[str] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase__ : Optional[int] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCamelCase__ : Dict = self.default_image_processor
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase__ : Tuple = ViTMAEConfig()
lowerCamelCase__ : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase__ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCamelCase__ : str = model(**UpperCamelCase__ , noise=UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : Any = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : str = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 41 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Dict = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ , lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : Tuple = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = np.empty((rows, cols) , dtype=UpperCamelCase )
lowerCamelCase__ : str = None
while queue:
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = heappop(UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = predecessors[x, y]
path.append(UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Any = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Union[str, Any] = dist + 1
lowerCamelCase__ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =jnp.floataa
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = hidden_states.shape
SCREAMING_SNAKE_CASE_ = jax.image.resize(
_A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
SCREAMING_SNAKE_CASE_ = self.conv(_A )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =jnp.floataa
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _A ) -> Optional[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
SCREAMING_SNAKE_CASE_ = self.conv(_A )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =None
UpperCAmelCase_ =0.0
UpperCAmelCase_ =None
UpperCAmelCase_ =jnp.floataa
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = nn.Dense(_A , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE_ = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , _A , _A , _A=True ) -> str:
SCREAMING_SNAKE_CASE_ = hidden_states
SCREAMING_SNAKE_CASE_ = self.norma(_A )
SCREAMING_SNAKE_CASE_ = nn.swish(_A )
SCREAMING_SNAKE_CASE_ = self.conva(_A )
SCREAMING_SNAKE_CASE_ = self.time_emb_proj(nn.swish(_A ) )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(jnp.expand_dims(_A , 1 ) , 1 )
SCREAMING_SNAKE_CASE_ = hidden_states + temb
SCREAMING_SNAKE_CASE_ = self.norma(_A )
SCREAMING_SNAKE_CASE_ = nn.swish(_A )
SCREAMING_SNAKE_CASE_ = self.dropout(_A , _A )
SCREAMING_SNAKE_CASE_ = self.conva(_A )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE_ = self.conv_shortcut(_A )
return hidden_states + residual
| 257 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__UpperCAmelCase = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
__UpperCAmelCase = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
__UpperCAmelCase = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _UpperCamelCase ( self , _A , _A , _A=4 , _A=False ) -> List[str]:
SCREAMING_SNAKE_CASE_ = compute_bleu(
reference_corpus=_A , translation_corpus=_A , max_order=_A , smooth=_A )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 257 | 1 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class snake_case :
def __init__( self : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = [2, 1, 2, -1]
__lowerCAmelCase: Optional[Any] = [1, 2, 3, 4]
def lowercase_ ( self : List[Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: List[str] = len(self.first_signal)
__lowerCAmelCase: Any = len(self.second_signal)
__lowerCAmelCase: Any = max(UpperCamelCase__ , UpperCamelCase__)
# create a zero matrix of max_length x max_length
__lowerCAmelCase: Any = [[0] * max_length for i in range(UpperCamelCase__)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase__):
__lowerCAmelCase: int = deque(self.second_signal)
rotated_signal.rotate(UpperCamelCase__)
for j, item in enumerate(UpperCamelCase__):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowerCAmelCase: List[str] = np.matmul(np.transpose(UpperCamelCase__) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(UpperCamelCase__ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 217 |
from statistics import mean
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list:
__snake_case: List[Any] = 0
# Number of processes finished
__snake_case: Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__snake_case: Dict = [0] * no_of_process
# List to include calculation results
__snake_case: Tuple = [0] * no_of_process
# Sort by arrival time.
__snake_case: int = [burst_time[i] for i in np.argsort(SCREAMING_SNAKE_CASE__)]
__snake_case: Any = [process_name[i] for i in np.argsort(SCREAMING_SNAKE_CASE__)]
arrival_time.sort()
while no_of_process > finished_process_count:
__snake_case: Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__snake_case: Any = arrival_time[i]
__snake_case: List[Any] = 0
# Index showing the location of the process being performed
__snake_case: Union[str, Any] = 0
# Saves the current response ratio.
__snake_case: Optional[Any] = 0
for i in range(0 , SCREAMING_SNAKE_CASE__):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__snake_case: Tuple = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__snake_case: Union[str, Any] = temp
__snake_case: Optional[int] = i
# Calculate the turn around time
__snake_case: Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__snake_case: Optional[int] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list:
__snake_case: Union[str, Any] = [0] * no_of_process
for i in range(0 , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = 5
__UpperCAmelCase : Tuple = ["A", "B", "C", "D", "E"]
__UpperCAmelCase : str = [1, 2, 3, 4, 5]
__UpperCAmelCase : Dict = [1, 2, 3, 4, 5]
__UpperCAmelCase : List[str] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__UpperCAmelCase : List[str] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
f'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(f'average waiting time : {mean(waiting_time):.5f}')
print(f'average turn around time : {mean(turn_around_time):.5f}')
| 111 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : bool ) -> List[Any]:
"""simple docstring"""
def run_func(_SCREAMING_SNAKE_CASE : List[Any] ):
@wraps(_SCREAMING_SNAKE_CASE )
def run_in_eager_mode(*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@wraps(_SCREAMING_SNAKE_CASE )
@tf.function(experimental_compile=_SCREAMING_SNAKE_CASE )
def run_in_graph_mode(*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : List[Any] ):
return func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ : str = random.Random()
UpperCAmelCase_ : List[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_SCREAMING_SNAKE_CASE , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : TensorFlowBenchmarkArguments
__A : PretrainedConfig
__A : str ="TensorFlow"
@property
def UpperCamelCase__ ( self ):
return tf.__version__
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
# initialize GPU on separate process
UpperCAmelCase_ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ : str = self._prepare_inference_func(_snake_case ,_snake_case ,_snake_case )
return self._measure_speed(_inference )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ : List[Any] = self._prepare_train_func(_snake_case ,_snake_case ,_snake_case )
return self._measure_speed(_train )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,_snake_case )
UpperCAmelCase_ : Dict = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ : Tuple = self._prepare_inference_func(_snake_case ,_snake_case ,_snake_case )
return self._measure_memory(_inference )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,_snake_case )
UpperCAmelCase_ : int = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
UpperCAmelCase_ : Dict = self._prepare_train_func(_snake_case ,_snake_case ,_snake_case )
return self._measure_memory(_train )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase_ : int = (
hasattr(_snake_case ,"architectures" )
and isinstance(config.architectures ,_snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ : Tuple = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ : Any = __import__("transformers" ,fromlist=[model_class] )
UpperCAmelCase_ : Any = getattr(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = model_cls(_snake_case )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase_ : int = TF_MODEL_MAPPING[config.__class__](_snake_case )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ : List[Any] = config.vocab_size if hasattr(_snake_case ,"vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase_ : List[str] = random_input_ids(_snake_case ,_snake_case ,_snake_case )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_forward():
return model(_snake_case ,decoder_input_ids=_snake_case ,training=_snake_case )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_forward():
return model(_snake_case ,training=_snake_case )
UpperCAmelCase_ : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Dict = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
UpperCAmelCase_ : str = (
hasattr(_snake_case ,"architectures" )
and isinstance(config.architectures ,_snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ : List[str] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ : Optional[int] = __import__("transformers" ,fromlist=[model_class] )
UpperCAmelCase_ : List[str] = getattr(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = model_cls(_snake_case )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
UpperCAmelCase_ : Optional[int] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case )
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ : Optional[Any] = config.vocab_size if hasattr(_snake_case ,"vocab_size" ) else config.encoder.vocab_size
UpperCAmelCase_ : Tuple = random_input_ids(_snake_case ,_snake_case ,_snake_case )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_train():
UpperCAmelCase_ : List[str] = model(_snake_case ,decoder_input_ids=_snake_case ,labels=_snake_case ,training=_snake_case )[0]
UpperCAmelCase_ : int = tf.gradients(_snake_case ,model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_train():
UpperCAmelCase_ : Dict = model(_snake_case ,labels=_snake_case ,training=_snake_case )[0]
UpperCAmelCase_ : Any = tf.gradients(_snake_case ,model.trainable_variables )
return gradients
UpperCAmelCase_ : Dict = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase__ ( self ,_snake_case ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(_snake_case ,repeat=1 ,number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ : Dict = timeit.repeat(
_snake_case ,repeat=self.args.repeat ,number=10 ,)
return min(_snake_case ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def UpperCamelCase__ ( self ,_snake_case ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
UpperCAmelCase_ : List[Any] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
UpperCAmelCase_ : Any = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ : Optional[Any] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCAmelCase_ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(_snake_case )
UpperCAmelCase_ : Union[str, Any] = meminfo.used
UpperCAmelCase_ : List[Any] = Memory(_snake_case )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
UpperCAmelCase_ : Any = None
else:
UpperCAmelCase_ : Any = measure_peak_memory_cpu(_snake_case )
UpperCAmelCase_ : Dict = Memory(_snake_case ) if isinstance(_snake_case ,_snake_case ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ : int = stop_memory_tracing(_snake_case )
if memory is None:
UpperCAmelCase_ : int = summary.total
else:
UpperCAmelCase_ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 67 |
'''simple docstring'''
import re
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
if len(re.findall("[ATCG]" , _SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.